Commit | Line | Data |
---|---|---|
920dae64 AT |
1 | /* |
2 | * ========== Copyright Header Begin ========================================== | |
3 | * | |
4 | * OpenSPARC T2 Processor File: niagara2.c | |
5 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. | |
6 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES. | |
7 | * | |
8 | * The above named program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public | |
10 | * License version 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * The above named program is distributed in the hope that it will be | |
13 | * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public | |
18 | * License along with this work; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. | |
20 | * | |
21 | * ========== Copyright Header End ============================================ | |
22 | */ | |
23 | /* | |
24 | * Copyright 2007 Sun Microsystems, Inc. All rights reserved. | |
25 | * Use is subject to license terms. | |
26 | */ | |
27 | #pragma ident "@(#)niagara2.c 1.79 07/10/12 SMI" | |
28 | ||
29 | #include <stdio.h> | |
30 | #include <stdlib.h> | |
31 | #include <unistd.h> | |
32 | #include <string.h> /* memcpy/memset */ | |
33 | #include <strings.h> | |
34 | #include <thread.h> | |
35 | ||
36 | #include "ss_common.h" | |
37 | #include "niagara2.h" | |
38 | #include "ss_hwtw.h" | |
39 | #include "pcie_device.h" | |
40 | #include "fpsim.h" | |
41 | ||
42 | #if INTERNAL_BUILD | |
43 | #include "stream_ma.h" | |
44 | #endif | |
45 | ||
46 | ||
47 | #ifdef FP_DECODE_DISABLED | |
48 | #define FP_DECODE_FPU_ON_CHECK \ | |
49 | if (!((sparcv9_cpu_t*)(sp->specificp))->fpu_on) goto n2_fp_disabled | |
50 | #else /* FP_DECODE_DISABLED */ | |
51 | #define FP_DECODE_FPU_ON_CHECK | |
52 | #endif /* FP_DECODE_DISABLED */ | |
53 | ||
54 | static void niagara2_init_trap_list(); | |
55 | static bool_t niagara2_init_proc_type(proc_type_t * proc_typep); | |
56 | static op_funcp niagara2_decode_me(simcpu_t *sp, xicache_instn_t * xcip, uint32_t instn); | |
57 | static void niagara2_get_pseudo_dev(config_proc_t *config_procp, char *dev_namep, void *devp); | |
58 | void niagara2_send_xirq(simcpu_t * sp, ss_proc_t * tnpp, uint64_t val); | |
59 | static uint64_t niagara2_ext_signal(config_proc_t * config_procp, ext_sig_t sigtype, void *vp); | |
60 | static bool_t ss_error_asi_noop_access(simcpu_t*, maccess_t, uint_t, uint_t, bool_t, tvaddr_t); | |
61 | static void niagara2_domain_check(domain_t *domainp); | |
62 | ||
63 | static void niagara2_init_trap_list() | |
64 | { | |
65 | /* | |
66 | * The SunSPARC traps are named with the prefix 'SS_', the N2 traps are prefixed with 'N2_'. | |
67 | * | |
68 | * The assignment of TT, priorities(both absolute and relative), and trap delivery mode | |
69 | * are based on N2 PRM, Rev. 1.0, 8/9/2005 | |
70 | */ | |
71 | static ss_trap_list_t setup_list[] = { | |
72 | ||
73 | /* Priorities 0 = highest, XX = Lowest */ | |
74 | /* Number Name Priority User Priv HPriv */ | |
75 | ||
76 | /* 0x00 */ { T( legion_save_state ), Pri( 0, 0), H, H, H }, | |
77 | /* 0x01 */ { T( power_on_reset ), Pri( 0, 0), H, H, H }, | |
78 | /* 0x02 */ { T( watchdog_reset ), Pri( 1, 2), H, H, H }, | |
79 | /* 0x03 */ { T( externally_initiated_reset ), Pri( 1, 1), H, H, H }, | |
80 | /* 0x04 */ { T( software_initiated_reset ), Pri( 1, 3), H, H, H }, | |
81 | /* 0x05 */ { T( RED_state_exception ), Pri( 1, 4), H, H, H }, | |
82 | ||
83 | /* 0x07 */ { TN2( store_error ), Pri( 2, 1), H, H, H }, | |
84 | /* 0x08 */ { T( IAE_privilege_violation ), Pri( 3, 1), H, X, X }, | |
85 | /* 0x09 */ { T( instruction_access_MMU_miss ), Pri( 2, 8), H, H, X }, | |
86 | /* 0x0a */ { T( instruction_access_error ), Pri( 4, 0), H, H, H }, | |
87 | ||
88 | /* 0x0b */ { T( IAE_unauth_access ), Pri( 2, 9), H, H, X }, | |
89 | /* 0x0c */ { T( IAE_NFO_page ), Pri( 3, 3), H, H, X }, | |
90 | /* 0x0d */ { TN2( instruction_address_range ), Pri( 2, 6), H, H, UH }, | |
91 | /* 0x0d */ { TN2( instruction_real_range ), Pri( 2, 6), H, H, UH }, | |
92 | ||
93 | /* 0x10 */ { T( illegal_instruction ), Pri( 6, 1), H, H, H }, | |
94 | /* 0x11 */ { T( privileged_opcode ), Pri( 7, 0), P, X, X }, | |
95 | ||
96 | /* LDD and STD are in fact implemented by niagara */ | |
97 | /* 0x12 */ { T( unimplemented_LDD ), Pri( 6, 0), X, X, X }, /* error if received by hypervisor. */ | |
98 | /* 0x13 */ { T( unimplemented_STD ), Pri( 6, 0), X, X, X }, /* error if received by hypervisor. */ | |
99 | ||
100 | /* 0x14 */ { T( DAE_invalid_ASI ), Pri(12, 1), H, H, UH }, | |
101 | /* 0x15 */ { T( DAE_privilege_violation ), Pri(12, 4), H, H, UH }, | |
102 | /* 0x16 */ { T( DAE_nc_page ), Pri(12, 5), H, H, UH }, | |
103 | /* 0x17 */ { T( DAE_NFO_page ), Pri(12, 6), H, H, UH }, | |
104 | ||
105 | ||
106 | /* 0x20 */ { T( fp_disabled ), Pri( 8, 1), P, P, UH }, /* error if received by hypervisor. */ | |
107 | /* 0x21 */ { T( fp_exception_ieee_754 ), Pri(11, 1), P, P, UH }, /* error if received by hypervisor. */ | |
108 | /* 0x22 */ { T( fp_exception_other ), Pri(11, 1), P, P, UH }, /* error if received by hypervisor. */ | |
109 | /* 0x23 */ { T( tag_overflow ), Pri(14, 0), P, P, UH }, /* error if received by hypervisor. */ | |
110 | /* 0x24 */ { T( clean_window ), Pri(10, 1), P, P, UH }, /* error if received by hypervisor - windows not used. */ | |
111 | ||
112 | /* 0x28 */ { T( division_by_zero ), Pri(15, 0), P, P, UH }, /* error if received by hypervisor. */ | |
113 | /* 0x29 */ { T( internal_processor_error ), Pri( 8, 2), H, H, H }, /* generated by register parity errors */ | |
114 | ||
115 | /* 0x2a */ { T( instruction_invalid_TSB_entry ), Pri( 2, 10), H, H, X }, | |
116 | /* 0x2b */ { T( data_invalid_TSB_entry ), Pri(12, 3), H, H, H }, | |
117 | /* 0x2d */ { TN2( mem_real_range ), Pri(11, 3), H, H, UH }, | |
118 | /* 0x2e */ { TN2( mem_address_range ), Pri(11, 3), H, H, UH }, | |
119 | ||
120 | /* 0x30 */ { T( DAE_so_page ), Pri(12, 6), H, H, UH }, | |
121 | /* 0x31 */ { T( data_access_MMU_miss ), Pri(12, 3), H, H, H }, | |
122 | /* 0x32 */ { T( data_access_error ), Pri(12, 9), H, H, H }, /* handle error and generate report to appropriate supervisor. */ | |
123 | ||
124 | /* 0x34 */ { T( mem_address_not_aligned ), Pri(10, 2), H, H, UH }, /* error if received by hypervisor. */ | |
125 | /* 0x35 */ { T( LDDF_mem_address_not_aligned ), Pri(10, 1), H, H, UH }, /* error if received by hypervisor. */ | |
126 | /* 0x36 */ { T( STDF_mem_address_not_aligned ), Pri(10, 1), H, H, UH }, /* error if received by hypervisor. */ | |
127 | /* 0x37 */ { T( privileged_action ), Pri(11, 1), H, H, X }, /* error if received from hypervisor. */ | |
128 | /* 0x38 */ { T( LDQF_mem_address_not_aligned ), Pri(10, 1), H, H, UH }, /* error if received by hypervisor. */ | |
129 | /* 0x39 */ { T( STQF_mem_address_not_aligned ), Pri(10, 1), H, H, UH }, /* error if received by hypervisor. */ | |
130 | ||
131 | /* 0x3b */ { TN2( unsupported_page_size ), Pri(13, 0), H, H, UH }, | |
132 | /* 0x3c */ { TN2( control_word_queue_interrupt ), Pri(16, 5), H, H, H }, | |
133 | /* 0x3d */ { TN2( modular_arithmetic_interrupt ), Pri(16, 4), H, H, H }, | |
134 | /* 0x3e */ { T( instruction_real_translation_miss ), Pri( 2, 8), H, H, X }, /* real to pa entry not found in ITLB */ | |
135 | /* 0x3f */ { T( data_real_translation_miss ), Pri(12, 3), H, H, H }, /* real to pa entry not found in DTLB */ | |
136 | ||
137 | /* this one ever generated ? */ | |
138 | /* 0x40 */ { T( sw_recoverable_error ), Pri(33, 1), H, H, H }, | |
139 | /* 0x41 */ { T( interrupt_level_1 ), Pri(31, 0), P, P, X }, | |
140 | /* 0x42 */ { T( interrupt_level_2 ), Pri(30, 0), P, P, X }, | |
141 | /* 0x43 */ { T( interrupt_level_3 ), Pri(29, 0), P, P, X }, | |
142 | /* 0x44 */ { T( interrupt_level_4 ), Pri(28, 0), P, P, X }, | |
143 | /* 0x45 */ { T( interrupt_level_5 ), Pri(27, 0), P, P, X }, | |
144 | /* 0x46 */ { T( interrupt_level_6 ), Pri(26, 0), P, P, X }, | |
145 | /* 0x47 */ { T( interrupt_level_7 ), Pri(25, 0), P, P, X }, | |
146 | /* 0x48 */ { T( interrupt_level_8 ), Pri(24, 0), P, P, X }, | |
147 | /* 0x49 */ { T( interrupt_level_9 ), Pri(23, 0), P, P, X }, | |
148 | /* 0x4a */ { T( interrupt_level_a ), Pri(22, 0), P, P, X }, | |
149 | /* 0x4b */ { T( interrupt_level_b ), Pri(21, 0), P, P, X }, | |
150 | /* 0x4c */ { T( interrupt_level_c ), Pri(20, 0), P, P, X }, | |
151 | /* 0x4d */ { T( interrupt_level_d ), Pri(19, 0), P, P, X }, | |
152 | /* 0x4e */ { T( interrupt_level_e ), Pri(18, 0), P, P, X }, | |
153 | /* 0x4f */ { T( interrupt_level_f ), Pri(17, 0), P, P, X }, | |
154 | ||
155 | /* 0x5e */ { T( hstick_match ), Pri(16, 1), H, H, H }, | |
156 | /* 0x5f */ { T( trap_level_zero ), Pri( 2, 2), H, H, X }, /* This trap requires TL==0, priv==1 and hpriv==0 */ | |
157 | ||
158 | /* 0x60 */ { T( interrupt_vector_trap ), Pri(16, 3), H, H, H }, /* handle & remap to sun4v as appropriate mondo queue */ | |
159 | /* 0x61 */ { T( RA_watchpoint ), Pri(12, 8), H, H, H }, | |
160 | /* 0x62 */ { T( VA_watchpoint ), Pri(11, 2), P, P, X }, /* error - VA watchpoints should be pended if hpriv=1 */ | |
161 | /* 0x63 */ { T( hw_corrected_error ), Pri(33, 2), H, H, H }, | |
162 | /* 0x64 */ { T( fast_instruction_access_MMU_miss ), Pri( 2, 8), H, H, X }, | |
163 | /* 0x68 */ { T( fast_data_access_MMU_miss ), Pri(12, 3), H, H, H }, | |
164 | /* 0x6c */ { T( fast_data_access_protection ), Pri(12, 7), H, H, H }, | |
165 | /* 0x71 */ { T( instruction_access_MMU_error ), Pri( 2, 7), H, H, X }, | |
166 | /* 0x72 */ { T( data_access_MMU_error ), Pri(12, 2), H, H, H }, | |
167 | /* 0x74 */ { T( control_transfer_instruction ), Pri(11, 1), P, P, H }, | |
168 | /* 0x75 */ { T( instruction_VA_watchpoint ), Pri( 2, 5), P, P, X }, | |
169 | /* 0x76 */ { T( instruction_breakpoint ), Pri( 6, 2), H, H, H }, | |
170 | /* 0x7c */ { T( cpu_mondo_trap ), Pri(16, 6), P, P, X }, | |
171 | /* 0x7d */ { T( dev_mondo_trap ), Pri(16, 7), P, P, X }, | |
172 | /* 0x7e */ { T( resumable_error ), Pri(33, 3), P, P, X }, | |
173 | /* faked by the hypervisor */ | |
174 | /* 0x7f */ { T( nonresumable_error ), Pri( 4, 0), SW, SW, SW }, | |
175 | ||
176 | /* 0x80 */ { T( spill_0_normal ), Pri( 9, 0), P, P, UH }, | |
177 | /* 0x84 */ { T( spill_1_normal ), Pri( 9, 0), P, P, UH }, | |
178 | /* 0x88 */ { T( spill_2_normal ), Pri( 9, 0), P, P, UH }, | |
179 | /* 0x8c */ { T( spill_3_normal ), Pri( 9, 0), P, P, UH }, | |
180 | /* 0x90 */ { T( spill_4_normal ), Pri( 9, 0), P, P, UH }, | |
181 | /* 0x94 */ { T( spill_5_normal ), Pri( 9, 0), P, P, UH }, | |
182 | /* 0x98 */ { T( spill_6_normal ), Pri( 9, 0), P, P, UH }, | |
183 | /* 0x9c */ { T( spill_7_normal ), Pri( 9, 0), P, P, UH }, | |
184 | ||
185 | /* 0xa0 */ { T( spill_0_other ), Pri( 9, 0), P, P, UH }, | |
186 | /* 0xa4 */ { T( spill_1_other ), Pri( 9, 0), P, P, UH }, | |
187 | /* 0xa8 */ { T( spill_2_other ), Pri( 9, 0), P, P, UH }, | |
188 | /* 0xac */ { T( spill_3_other ), Pri( 9, 0), P, P, UH }, | |
189 | /* 0xb0 */ { T( spill_4_other ), Pri( 9, 0), P, P, UH }, | |
190 | /* 0xb4 */ { T( spill_5_other ), Pri( 9, 0), P, P, UH }, | |
191 | /* 0xb8 */ { T( spill_6_other ), Pri( 9, 0), P, P, UH }, | |
192 | /* 0xbc */ { T( spill_7_other ), Pri( 9, 0), P, P, UH }, | |
193 | ||
194 | /* 0xc0 */ { T( fill_0_normal ), Pri( 9, 0), P, P, UH }, | |
195 | /* 0xc4 */ { T( fill_1_normal ), Pri( 9, 0), P, P, UH }, | |
196 | /* 0xc8 */ { T( fill_2_normal ), Pri( 9, 0), P, P, UH }, | |
197 | /* 0xcc */ { T( fill_3_normal ), Pri( 9, 0), P, P, UH }, | |
198 | /* 0xd0 */ { T( fill_4_normal ), Pri( 9, 0), P, P, UH }, | |
199 | /* 0xd4 */ { T( fill_5_normal ), Pri( 9, 0), P, P, UH }, | |
200 | /* 0xd8 */ { T( fill_6_normal ), Pri( 9, 0), P, P, UH }, | |
201 | /* 0xdc */ { T( fill_7_normal ), Pri( 9, 0), P, P, UH }, | |
202 | ||
203 | /* 0xe0 */ { T( fill_0_other ), Pri( 9, 0), P, P, UH }, | |
204 | /* 0xe4 */ { T( fill_1_other ), Pri( 9, 0), P, P, UH }, | |
205 | /* 0xe8 */ { T( fill_2_other ), Pri( 9, 0), P, P, UH }, | |
206 | /* 0xec */ { T( fill_3_other ), Pri( 9, 0), P, P, UH }, | |
207 | /* 0xf0 */ { T( fill_4_other ), Pri( 9, 0), P, P, UH }, | |
208 | /* 0xf4 */ { T( fill_5_other ), Pri( 9, 0), P, P, UH }, | |
209 | /* 0xf8 */ { T( fill_6_other ), Pri( 9, 0), P, P, UH }, | |
210 | /* 0xfc */ { T( fill_7_other ), Pri( 9, 0), P, P, UH }, | |
211 | ||
212 | /*0x100-0x17f*/{T( trap_instruction ), Pri( 16,2), P, P, H }, /* hv1: handles hypervisor traps only. Error if received from hypervisor. */ | |
213 | /*0x180-0x1ff*/{T( htrap_instruction ), Pri( 16,2), X, H, UH }, /* used to implement the supervisor to hypervisor API call. */ | |
214 | ||
215 | #undef T | |
216 | #undef TN1 | |
217 | #undef TN2 | |
218 | #undef TRK | |
219 | #undef X | |
220 | #undef SW | |
221 | #undef P | |
222 | #undef H | |
223 | #undef UH | |
224 | #undef Pri | |
225 | ||
226 | { -1, (char*)0 }, | |
227 | ||
228 | }; | |
229 | uint_t i; | |
230 | ||
231 | for (i=0; setup_list[i].trap_type != -1; i++) { | |
232 | ASSERT( setup_list[i].trap_type>=SS_trap_legion_save_state && setup_list[i].trap_type<SS_trap_illegal_value ); | |
233 | ||
234 | ss_trap_list[ setup_list[i].trap_type ] = setup_list[i]; | |
235 | } | |
236 | ||
237 | /* Now clone the trap instruction entries */ | |
238 | ||
239 | for (i=0x101; i<0x180; i++) { | |
240 | ss_trap_list[ i ] = ss_trap_list[ 0x100 ]; | |
241 | ss_trap_list[ i ].trap_type = i; | |
242 | } | |
243 | ||
244 | for (i=0x181; i<0x200; i++) { | |
245 | ss_trap_list[ i ] = ss_trap_list[ 0x180 ]; | |
246 | ss_trap_list[ i ].trap_type = i; | |
247 | } | |
248 | } | |
249 | ||
250 | ||
251 | #ifdef VFALLS /* { */ | |
252 | #define PROCESSOR_NAME "vfalls" | |
253 | #define PROCESSOR_TYPE proc_type_vfalls | |
254 | ||
255 | #else | |
256 | ||
257 | #define PROCESSOR_NAME "niagara2" | |
258 | #define PROCESSOR_TYPE proc_type_niagara2 | |
259 | ||
260 | #endif /* } VFALLS */ | |
261 | ||
262 | extern struct fpsim_functions fpsim_funclist; | |
263 | ||
264 | proc_type_t PROCESSOR_TYPE={ | |
265 | PROCESSOR_NAME, | |
266 | false, /* module initialised */ | |
267 | niagara2_init_proc_type, | |
268 | ||
269 | /* config support */ | |
270 | ss_parse, | |
271 | ss_init, | |
272 | ss_dump, | |
273 | ||
274 | /* execution support functions */ | |
275 | ss_dbgr_regread, | |
276 | ss_dbgr_regwrite, | |
277 | ss_exec_setup, | |
278 | ss_exec_cleanup, | |
279 | ss_save_state, | |
280 | ||
281 | ss_check_async_event, | |
282 | ss_take_exception, | |
283 | ||
284 | #if ERROR_INJECTION | |
285 | ss_error_condition, | |
286 | #endif | |
287 | #if ERROR_TRAP_GEN /* { */ | |
288 | trigger_error_trap, | |
289 | ss_error_reload_file, | |
290 | ss_error_dump_active, | |
291 | ss_error_dump_supported, | |
292 | #endif /* } */ | |
293 | n2_sp_interrupt, | |
294 | ||
295 | niagara2_decode_me, | |
296 | ||
297 | /* pointer to fpsim instructions */ | |
298 | &fpsim_funclist, | |
299 | ||
300 | /* performance measuring funcs */ | |
301 | sparcv9_perf_dump, | |
302 | ||
303 | /* dump tlb, instruction counts etc */ | |
304 | ss_dump_tlbs, | |
305 | ss_dump_instruction_counts, | |
306 | ||
307 | /* external interface methods */ | |
308 | niagara2_ext_signal, | |
309 | ss_get_cpuid, | |
310 | niagara2_get_pseudo_dev, | |
311 | ss_dev_mem_access, | |
312 | ||
313 | /* debugger interface methods */ | |
314 | ss_dbgr_attach, | |
315 | ss_dbgr_detach, | |
316 | ||
317 | ss_dbgr_mem_read, | |
318 | ss_dbgr_mem_write, | |
319 | ss_dbgr_mem_clear, | |
320 | ||
321 | ss_dbgr_set_break, | |
322 | ss_dbgr_clear_break, | |
323 | niagara2_domain_check, | |
324 | ||
325 | sparcv9_reg_map, | |
326 | ||
327 | NULL, /* debug_hookp */ | |
328 | NULL, /* debug_hook_dumpp */ | |
329 | ||
330 | CPU_MAGIC | |
331 | }; | |
332 | ||
333 | ||
334 | /* | |
335 | * Basic module init | |
336 | * | |
337 | * Returns false if error initialising module, true if init was OK | |
338 | */ | |
339 | bool_t niagara2_init_proc_type(proc_type_t * proctp) | |
340 | { | |
341 | if (proctp->flag_initialised) { | |
342 | warning("Initialisation of module %s more than once - bailing", proctp->proc_type_namep); | |
343 | return true; | |
344 | } | |
345 | ||
346 | /* stuff here we only need to do once if we want to use this module */ | |
347 | niagara2_init_trap_list(); | |
348 | ||
349 | proctp->flag_initialised = true; | |
350 | ||
351 | #if ERROR_INJECTION | |
352 | niagara2_init_error_list(); | |
353 | #endif | |
354 | return true; | |
355 | } | |
356 | ||
357 | ||
358 | /* | |
359 | * This function fills the PA<39:0> field of the TSB pointer registers with the | |
360 | * current data stored in the Tag access register and appropriate TSB config | |
361 | * register. See 13.11.12 of N2 PRM, rev 0.6, for more details. | |
362 | */ | |
363 | ||
364 | static uint64_t ss_make_tsb_pointer(tvaddr_t va, ss_tsb_info_t *tsb_config_regp) | |
365 | { | |
366 | uint64_t vpn, tte_addr; | |
367 | uint_t ps, n, shift, tte_idx; | |
368 | ||
369 | n = tsb_config_regp->tsb_size; | |
370 | ps = tsb_config_regp->page_size; | |
371 | ||
372 | shift = SUN4V_PAGE_OFFSET(ps); | |
373 | vpn = va & SUN4V_VPN_MASK(ps); | |
374 | tte_idx = (vpn & SUN4V_TTE_IDX_MASK(n,ps)) >> shift; | |
375 | tte_addr = tsb_config_regp->tsb_base | (tte_idx << 4); | |
376 | ||
377 | return (tte_addr); | |
378 | } | |
379 | ||
380 | uint8_t * ss_make_tsb_pointer_int(tvaddr_t va, ss_tsb_info_t *tsb_config_regp) | |
381 | { | |
382 | uint64_t vpn; | |
383 | uint8_t * tte_addr; | |
384 | uint_t ps, n, shift, tte_idx; | |
385 | ||
386 | n = tsb_config_regp->tsb_size; | |
387 | ps = tsb_config_regp->page_size; | |
388 | ||
389 | shift = SUN4V_PAGE_OFFSET(ps); | |
390 | vpn = va & SUN4V_VPN_MASK(ps); | |
391 | tte_idx = (vpn & SUN4V_TTE_IDX_MASK(n,ps)) >> shift; | |
392 | tte_addr = tsb_config_regp->tsb_base_sim + (tte_idx << 4); | |
393 | ||
394 | return (tte_addr); | |
395 | } | |
396 | ||
397 | /* | |
398 | * Converts an RA to a PA during HW tablewalk based on the configuration of | |
399 | * the MMU Real Range and MMU Physical Offset registers. | |
400 | * | |
401 | * Returns true if a translation was found, false otherwise. | |
402 | */ | |
403 | bool_t ss_hwtw_convert_ra_to_pa(tvaddr_t ra, tvaddr_t * pa, ss_strand_t *nsp, uint_t ps) { | |
404 | ||
405 | bool_t rval = false; | |
406 | uint_t i; | |
407 | tvaddr_t tsb_lb_addr, tsb_ub_addr; | |
408 | uint64_t page_size = 1 << SUN4V_PAGE_SIZE_SHIFT(ps); | |
409 | ||
410 | /* | |
411 | * sanity check on bits 55:40 of the RA (must be zero) | |
412 | */ | |
413 | if ((ra >> (SUN4V_PN_UBIT+1)) != 0) | |
414 | return (false); | |
415 | ||
416 | /* | |
417 | * check if ra lies in one of four ranges specified by the range registers | |
418 | */ | |
419 | for (i=0; i<4; i++) { | |
420 | /* | |
421 | * check the enable bit of the real range register | |
422 | */ | |
423 | if (!(nsp->real_range_reg[i]>>63)) continue; | |
424 | ||
425 | tsb_lb_addr = (nsp->real_range_reg[i] & MASK64(26, 0)) << SUN4V_PN_LBIT; | |
426 | tsb_ub_addr = ((nsp->real_range_reg[i] & MASK64(53, 27)) >> 27) << SUN4V_PN_LBIT; | |
427 | if ((ra < tsb_lb_addr) || ((ra+page_size) > tsb_ub_addr)) continue; | |
428 | ||
429 | /* | |
430 | * translate RPN into PPN | |
431 | */ | |
432 | *pa = ra + (nsp->phy_off_reg[i] & SUN4V_PN_MASK); | |
433 | rval = true; | |
434 | break; | |
435 | } | |
436 | ||
437 | return (rval); | |
438 | } | |
439 | ||
440 | ||
441 | /* | |
442 | * Implementation note: | |
443 | * | |
444 | * This function is almost identical to Niagara's ss_tlb_insert() except that it | |
445 | * | |
446 | * - works for the sun4v TTE format (the only format supported by N2). | |
447 | * - supports a lock-free TLB update (as opposed to the 'locked down' feature in N1) | |
448 | * - autodemaps pages of the same size or larger (N1 demaps pages of same, larger or smaller size) | |
449 | * - returns various TTE bits, packed in 'flags', and pa_offset to the TLB miss handler | |
450 | */ | |
451 | /* This is the encoding used for parity generation */ | |
452 | static uint8_t niagara2_size_encoding[8] = { 0, 1, 0, 3, 0, 7 }; | |
453 | ss_trap_type_t ss_tlb_insert(simcpu_t * sp, ss_mmu_t * mmup, ss_tlb_t * tlbp, | |
454 | uint_t partid, bool_t is_real, uint64_t data, uint_t *flags, uint64_t *pa_offset) | |
455 | { | |
456 | tlb_entry_t *tep; | |
457 | uint_t shift, size; | |
458 | tvaddr_t tag; | |
459 | uint16_t tag_context; | |
460 | matchcontext_t match_context; | |
461 | uint_t i; | |
462 | bool_t need_flush = false; | |
463 | ||
464 | data &= NIAGARA2_DATA_IN_MASK; | |
465 | ||
466 | /* | |
467 | * figure out the useful info about the page to insert | |
468 | */ | |
469 | size = SUN4V_TTED_PS(data); | |
470 | shift = SUN4V_PAGE_SIZE_SHIFT(size); | |
471 | if (!shift) | |
472 | return N2_trap_unsupported_page_size; | |
473 | ||
474 | if (tlbp->parity) { | |
475 | data |= xor_bits((data & | |
476 | ~((1ULL << SUN4V_TTED_V_BIT)|MASK64(3,0))) | | |
477 | niagara2_size_encoding[size]) << | |
478 | NIAGARA2_DATA_ACCESS_PAR_BIT; | |
479 | } | |
480 | ||
481 | /* | |
482 | * This is VERY important: | |
483 | * The tag access register need NOT contain a correctly aligned tag entry | |
484 | * for the given page size. So it is REALLY IMPORTANT when forming the TLB | |
485 | * entry tag field that we correctly mask off the lower bits corresponding to | |
486 | * the selected page size. This especially important because we use this value to | |
487 | * compute a va-pa offset. | |
488 | * Note: we do a similar mask operation later when using the PA to compute the | |
489 | * offset value we create. | |
490 | */ | |
491 | tag = mmup->tag_access_reg & MASK64(63,shift); | |
492 | ||
493 | tag_context = is_real ? | |
494 | NIAGARA2_REAL_CONTEXT : mmup->tag_access_reg & MASK64(12,0); | |
495 | ||
496 | match_context = is_real ? SS_TLB_REAL_CONTEXT : tag_context; | |
497 | ||
498 | RW_wrlock(&tlbp->rwlock); | |
499 | ||
500 | /* | |
501 | * Do autodemap: demap the old TLB entry whose page size is of | |
502 | * the same or larger than the new entry. | |
503 | */ | |
504 | tep = &(tlbp->tlb_entryp[0]); | |
505 | for (i=tlbp->nentries; i>0; i--, tep++) { | |
506 | tvaddr_t xor; | |
507 | ||
508 | if (tep->hashidx == -1) | |
509 | continue; | |
510 | ||
511 | xor = tep->tag_pfn ^ tag; | |
512 | ||
513 | if (tep->match_shift >= shift && (xor>>tep->match_shift)==0 && | |
514 | tep->match_context == match_context && tep->partid == partid) { | |
515 | ||
516 | DBGMMU( lprintf(sp->gid, "ss_tlb_insert: autodemap %c-TLB: old sz=0x%x new sz=0x%x\n", | |
517 | mmup->is_immu ? 'I' : 'D', SUN4V_TTED_PS(tep->data), size); ); | |
518 | ||
519 | need_flush = true; | |
520 | /* | |
521 | * matching entry - put back on the free list | |
522 | */ | |
523 | ss_tlb_unhash(tlbp, tep); | |
524 | ss_free_tlb_entry( tlbp, tep ); | |
525 | ||
526 | #if ERROR_INJECTION | |
527 | tlb_entry_error_match(sp, mmup, tep); | |
528 | #endif | |
529 | } | |
530 | } | |
531 | ||
532 | /* | |
533 | * replace an entry chosen randomly or in a Round Robin fashion | |
534 | */ | |
535 | tep = tlbp->freep; | |
536 | if (tep == (tlb_entry_t*)0) { | |
537 | #if SS_TLB_REPLACE_RANDOM /* { */ | |
538 | i = random() % tlbp->nentries; | |
539 | tep = &(tlbp->tlb_entryp[i]); | |
540 | #elif SS_TLB_REPLACE_RROBIN /* } { */ | |
541 | i = tlbp->last_replaced + 1; | |
542 | if (i>=tlbp->nentries) i=0; /* wrap */ | |
543 | tep = &(tlbp->tlb_entryp[i]); | |
544 | tlbp->last_replaced = i; | |
545 | #else | |
546 | #error Need to define TLB replacement alg | |
547 | #endif /* } */ | |
548 | need_flush = true; | |
549 | /* put back on the free list */ | |
550 | ss_tlb_unhash(tlbp, tep); | |
551 | ss_free_tlb_entry( tlbp, tep ); | |
552 | tep = tlbp->freep; | |
553 | } | |
554 | ||
555 | /* | |
556 | * free entry must be invalid ! | |
557 | */ | |
558 | ASSERT(!SUN4V_TTED_V(tep->data)); | |
559 | ||
560 | tlbp->freep = tep->nextp; | |
561 | ||
562 | /* | |
563 | * create the new entry | |
564 | */ | |
565 | tep->is_real = is_real; | |
566 | tep->match_context = match_context; | |
567 | tep->partid = partid; | |
568 | tep->match_shift = shift; | |
569 | tep->tag_pfn = tag; | |
570 | tep->tag_context = tag_context; | |
571 | tep->data = data; | |
572 | ||
573 | /* | |
574 | * Note: variable size mask again based on page size | |
575 | */ | |
576 | tep->pa_offset = (data & MASK64(39,shift)) - tag; | |
577 | ||
578 | /* niagara2 doesn't have read and exec bits */ | |
579 | if (mmup->is_immu) | |
580 | tep->flags = SS_TLB_FLAG_EXEC; | |
581 | else | |
582 | tep->flags = SS_TLB_FLAG_READ; | |
583 | if (SUN4V_TTED_W(data)) tep->flags |= SS_TLB_FLAG_WRITE; | |
584 | if (SUN4V_TTED_P(data)) tep->flags |= SS_TLB_FLAG_PRIV; | |
585 | if (SUN4V_TTED_CP(data)) tep->flags |= SS_TLB_FLAG_CP; | |
586 | if (SUN4V_TTED_E(data)) tep->flags |= SS_TLB_FLAG_E; | |
587 | if (SUN4V_TTED_NFO(data))tep->flags |= SS_TLB_FLAG_NFO; | |
588 | if (SUN4V_TTED_IE(data)) tep->flags |= SS_TLB_FLAG_IE; | |
589 | ||
590 | /* Finally insert the new entry into the hash table for the TLB */ | |
591 | ||
592 | /* Hash uses match_context so it skews real->phys entries away from context 0 */ | |
593 | i = tag >> SS_MAX_PAGE_SIZE_BITS; | |
594 | i += match_context + partid; | |
595 | i &= SS_TLB_HASH_MASK; | |
596 | ||
597 | /* PRM notes that inserting with V = 0 operates like V = 1 */ | |
598 | if (SUN4V_TTED_V(data)) { | |
599 | tep->hashidx = i; /* to help with unhooking later */ | |
600 | ||
601 | tep->nextp = tlbp->hash[i].ptr; | |
602 | tlbp->hash[i].ptr = tep; | |
603 | } else { | |
604 | tep->nextp = tlbp->freep; | |
605 | tlbp->freep = tep; | |
606 | } | |
607 | ||
608 | DBGMMU(lprintf(sp->gid,"ss_tlb_insert: %c-TLB: sun4v tte=%llx [nfo=%d e=%d cp=%d p=%d ep=%d w=%d sz=0x%x]\n", | |
609 | mmup->is_immu ? 'I' : 'D', data, | |
610 | SUN4V_TTED_NFO(data), SUN4V_TTED_E(data), SUN4V_TTED_CP(data), SUN4V_TTED_P(data), | |
611 | SUN4V_TTED_EP(data), SUN4V_TTED_W(data), SUN4V_TTED_PS(data)); | |
612 | lprintf(sp->gid, "\tpart=0x%x tag=%p ctx=%x/%x offset=%llx shift=%d flags=0x%x\n", | |
613 | partid, tag, tag_context, match_context, tep->pa_offset, tep->match_shift,tep->flags); ); | |
614 | ||
615 | /* | |
616 | * return tep->flags and tep->pa_offset | |
617 | */ | |
618 | *flags = tep->flags; | |
619 | *pa_offset = tep->pa_offset; | |
620 | ||
621 | RW_unlock(&tlbp->rwlock); | |
622 | ||
623 | if (need_flush) { | |
624 | if (mmup->is_immu) | |
625 | sp->xicache_trans_flush_pending = true; | |
626 | else | |
627 | sp->xdcache_trans_flush_pending = true; | |
628 | if (tlbp->shares > 1) { | |
629 | ss_tlb_flush_shares(sp, tlbp, mmup->is_immu); | |
630 | } | |
631 | } | |
632 | ||
633 | return SS_trap_NONE; | |
634 | } | |
635 | ||
636 | ss_trap_type_t ss_tlb_insert_idx(simcpu_t * sp, ss_mmu_t * mmup, | |
637 | ss_tlb_t * tlbp, uint_t partid, bool_t is_real, uint64_t data, uint_t idx1) | |
638 | { | |
639 | tlb_entry_t *tep; | |
640 | uint_t shift, size; | |
641 | tvaddr_t tag; | |
642 | uint16_t tag_context; | |
643 | matchcontext_t match_context; | |
644 | uint_t i; | |
645 | uint_t idx; | |
646 | bool_t need_flush = false; | |
647 | ||
648 | /* | |
649 | * mask out bits which are ignored in Niagara2 HW. | |
650 | * Since this is a direct store to the TLB, the mask includes | |
651 | * additional bits. | |
652 | * TODO: confirm that parity is not generated for data access. | |
653 | */ | |
654 | uint64_t tte_data = data & NIAGARA2_DATA_ACCESS_MASK; | |
655 | ||
656 | /* | |
657 | * figure out the useful info about the page to insert | |
658 | */ | |
659 | size = SUN4V_TTED_PS(tte_data); | |
660 | shift = SUN4V_PAGE_SIZE_SHIFT(size); | |
661 | if (!shift) | |
662 | return N2_trap_unsupported_page_size; | |
663 | ||
664 | tag = mmup->tag_access_reg & MASK64(63,shift); | |
665 | ||
666 | tag_context = is_real ? | |
667 | NIAGARA2_REAL_CONTEXT : mmup->tag_access_reg & MASK64(12,0); | |
668 | match_context = is_real ? SS_TLB_REAL_CONTEXT : tag_context; | |
669 | ||
670 | /* | |
671 | * Hash uses match_context so it skews real->phys entries away | |
672 | * from context 0 | |
673 | */ | |
674 | ||
675 | idx = tag >> SS_MAX_PAGE_SIZE_BITS; | |
676 | idx += match_context + partid; | |
677 | idx &= SS_TLB_HASH_MASK; | |
678 | ||
679 | RW_wrlock(&tlbp->rwlock); | |
680 | ||
681 | tep = &(tlbp->tlb_entryp[idx1]); | |
682 | if (tep->hashidx != -1) { | |
683 | need_flush = true; | |
684 | ss_tlb_unhash(tlbp, tep); | |
685 | } else | |
686 | ss_tlb_unfree(tlbp, tep); | |
687 | ||
688 | /* overwrite the existing entry */ | |
689 | ||
690 | tep->is_real = is_real; | |
691 | tep->match_context = match_context; | |
692 | tep->partid = partid; | |
693 | tep->match_shift = shift; | |
694 | tep->tag_pfn = tag; | |
695 | tep->tag_context = tag_context; | |
696 | tep->data = tte_data; | |
697 | tep->pa_offset = (tte_data & MASK64(39,shift)) - tag; | |
698 | ||
699 | DBGMMU( lprintf(sp->gid, "ss_tlb_insert_idx: %c-TLB[0x%x]: sun4v tte=%llx " | |
700 | "[nfo=%d e=%d cp=%d p=%d ep=%d w=%d sz=0x%x]\n", | |
701 | mmup->is_immu ? 'I' : 'D', idx1, data, | |
702 | SUN4V_TTED_NFO(tte_data), SUN4V_TTED_E(tte_data), | |
703 | SUN4V_TTED_CP(tte_data), SUN4V_TTED_P(tte_data), | |
704 | SUN4V_TTED_EP(tte_data), SUN4V_TTED_W(tte_data), | |
705 | SUN4V_TTED_PS(tte_data)); | |
706 | lprintf(sp->gid, "\tpart=0x%x tag=%p ctx=%x/%x offset=%llx " | |
707 | "shift=%d flags=0x%x\n", | |
708 | partid, tag, tag_context, match_context, tep->pa_offset, | |
709 | tep->match_shift,tep->flags); ); | |
710 | ||
711 | /* niagara2 doesn't have read and exec bits */ | |
712 | if (mmup->is_immu) | |
713 | tep->flags = SS_TLB_FLAG_EXEC; | |
714 | else | |
715 | tep->flags = SS_TLB_FLAG_READ; | |
716 | if (SUN4V_TTED_W(tte_data)) tep->flags |= SS_TLB_FLAG_WRITE; | |
717 | if (SUN4V_TTED_P(tte_data)) tep->flags |= SS_TLB_FLAG_PRIV; | |
718 | if (SUN4V_TTED_CP(tte_data)) tep->flags |= SS_TLB_FLAG_CP; | |
719 | if (SUN4V_TTED_E(tte_data)) tep->flags |= SS_TLB_FLAG_E; | |
720 | if (SUN4V_TTED_NFO(tte_data))tep->flags |= SS_TLB_FLAG_NFO; | |
721 | if (SUN4V_TTED_IE(tte_data)) tep->flags |= SS_TLB_FLAG_IE; | |
722 | ||
723 | /* | |
724 | * Finally re-insert the entry into the hash table for the TLB, | |
725 | * if valid. Direct writes may write any bit pattern. | |
726 | */ | |
727 | ||
728 | if (SUN4V_TTED_V(tte_data)) { | |
729 | tep->hashidx = idx; /* to help with unhooking later */ | |
730 | ||
731 | tep->nextp = tlbp->hash[idx].ptr; | |
732 | tlbp->hash[idx].ptr = tep; | |
733 | } else { | |
734 | tep->nextp = tlbp->freep; | |
735 | tlbp->freep = tep; | |
736 | } | |
737 | ||
738 | RW_unlock(&tlbp->rwlock); | |
739 | ||
740 | if (need_flush) { | |
741 | if (mmup->is_immu) | |
742 | sp->xicache_trans_flush_pending = true; | |
743 | else | |
744 | sp->xdcache_trans_flush_pending = true; | |
745 | if (tlbp->shares > 1) { | |
746 | ss_tlb_flush_shares(sp, tlbp, mmup->is_immu); | |
747 | } | |
748 | } | |
749 | ||
750 | return SS_trap_NONE; | |
751 | } | |
752 | ||
753 | ||
754 | /* | |
755 | * Fill in the TSB config register with the given data. | |
756 | */ | |
757 | void niagara2_write_tsb_config(simcpu_t *sp, ss_tsb_info_t *tsb_config_reg, uint64_t data) | |
758 | { | |
759 | uint_t shift; | |
760 | ||
761 | tsb_config_reg->data = data; | |
762 | tsb_config_reg->enable = ((data >> 63)&1) ? true : false; | |
763 | tsb_config_reg->use_context_0 = ((data >> 62)&1) ? true : false; | |
764 | tsb_config_reg->use_context_1 = ((data >> 61)&1) ? true : false; | |
765 | tsb_config_reg->ra_not_pa = ((data >> 8)&1) ? true : false; | |
766 | /* MASK should be (7, 4) and check for unsupported page size */ | |
767 | tsb_config_reg->page_size = (data & MASK64(6, 4)) >> 4; | |
768 | shift = SUN4V_PAGE_SIZE_SHIFT(tsb_config_reg->page_size); | |
769 | if (shift > 22) | |
770 | tsb_config_reg->tag_match_shift = shift - 22; | |
771 | else | |
772 | tsb_config_reg->tag_match_shift = 0; | |
773 | tsb_config_reg->tsb_size = data & MASK64(3, 0); | |
774 | tsb_config_reg->tsb_base = data & | |
775 | SUN4V_TSB_BASE_MASK(tsb_config_reg->tsb_size); | |
776 | if (tsb_config_reg->enable) | |
777 | tsb_config_reg->tsb_base_sim = ss_hwtw_find_base(sp, tsb_config_reg); | |
778 | else | |
779 | tsb_config_reg->tsb_base_sim = NULL; | |
780 | } | |
781 | ||
782 | ||
783 | /* | |
784 | * We arrive here because: | |
785 | * 1) a malformed (unaligned PC) | |
786 | * 2) a TLB / icache miss | |
787 | * 3) an x-cache miss | |
788 | */ | |
789 | ||
790 | ||
791 | void ss_xic_miss(simcpu_t * sp, xicache_line_t * xc_linep, tvaddr_t pc) | |
792 | { | |
793 | tvaddr_t va, tag; | |
794 | tpaddr_t pa, pa_tag; | |
795 | config_addr_t * cap; | |
796 | tpaddr_t extent; | |
797 | uint8_t * bufp; | |
798 | sparcv9_cpu_t * v9p; | |
799 | ss_strand_t * nsp; | |
800 | ss_proc_t * npp; | |
801 | uint_t context, context_type, miss_context; | |
802 | error_conf_t * ep; | |
803 | error_t * errorp; | |
804 | bool_t search_tlb_again = false; | |
805 | ||
806 | v9p = (sparcv9_cpu_t *)(sp->specificp); | |
807 | nsp = v9p->impl_specificp; | |
808 | npp = sp->config_procp->procp; | |
809 | #if ERROR_INJECTION | |
810 | errorp = sp->errorp; | |
811 | #endif | |
812 | ||
813 | /* | |
814 | * determine context in terms of TL | |
815 | */ | |
816 | if (v9p->tl>0) { | |
817 | miss_context = context = SS_NUCLEUS_CONTEXT; | |
818 | context_type = ss_ctx_nucleus; | |
819 | } else { | |
820 | miss_context = context = nsp->pri_context; | |
821 | context_type = ss_ctx_primary; | |
822 | if (nsp->pri_context != nsp->pri_context1) | |
823 | search_tlb_again = true; | |
824 | } | |
825 | ||
826 | /* The PC always has bits 0 & 1 zero */ | |
827 | ASSERT((pc & 0x3) == 0); | |
828 | ||
829 | /* align the pc to the start of the XC line */ | |
830 | va = pc; | |
831 | tag = va & XICACHE_TAG_PURE_MASK; | |
832 | ||
833 | /* | |
834 | * Perform a virtual to physical translation | |
835 | * so we can determine if we are dealing with | |
836 | * a TLB miss or simply an x-cache miss. | |
837 | */ | |
838 | ||
839 | /* Find the pa corresponding to the line we need */ | |
840 | /* We assume that for SunSPARC, the TLB is off in Hyper priv mode */ | |
841 | /* FIXME: we should probably do this by swizzling a function pointer */ | |
842 | /* for this when we change mode, rather that having an if here ... fix later */ | |
843 | ||
844 | pa_tag = tag; | |
845 | ||
846 | if (v9p->pstate.addr_mask) { | |
847 | pc &= MASK64(31,0); | |
848 | pa_tag &= MASK64(31,0); | |
849 | va &= MASK64(31,0); | |
850 | /* NOTE: we dont mask tag ... we allow that to match the 64bit address */ | |
851 | } | |
852 | ||
853 | pa = va; | |
854 | if (!nsp->mmu_bypass) { | |
855 | uint_t idx, partid; | |
856 | ss_tlb_t * tlbp; | |
857 | tlb_entry_t *tep, *tmp_tep; | |
858 | uint_t flags; | |
859 | uint64_t pa_offset; | |
860 | ss_trap_type_t miss_trap_type; | |
861 | ||
862 | /* If MMU disabled, but we're in priv/user mode use real addresses */ | |
863 | ||
864 | if (!nsp->immu.enabled) { | |
865 | context = SS_TLB_REAL_CONTEXT; | |
866 | search_tlb_again = false; | |
867 | ||
868 | } | |
869 | /* | |
870 | * check out of range address (if lie within the "VA hole" | |
871 | * or "RA hole") | |
872 | */ | |
873 | if ((va >= SS_VA_HOLE_LB) && (va <= SS_VA_HOLE_UB)) { | |
874 | ss_trap_type_t tt; | |
875 | /* | |
876 | * setup the right trap type | |
877 | */ | |
878 | if (context == SS_TLB_REAL_CONTEXT) | |
879 | tt = N2_trap_instruction_real_range; | |
880 | else | |
881 | tt = N2_trap_instruction_address_range; | |
882 | SET_ITLB_FAULT( nsp, VA48(va) ); | |
883 | nsp->immu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | | |
884 | miss_context; | |
885 | DBGMMU( lprintf(sp->gid, "IMMU tag access = 0x%llx\n", nsp->immu.tag_access_reg); ); | |
886 | MEMORY_ACCESS_TRAP(); | |
887 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)tt); | |
888 | return; | |
889 | } | |
890 | ||
891 | tlbp = nsp->itlbp; | |
892 | RW_rdlock(&tlbp->rwlock); | |
893 | ||
894 | partid = nsp->partid; | |
895 | ||
896 | n2_itlb_search:; | |
897 | /* FIXME: Need a better hash than this ! */ | |
898 | idx = va >> SS_MAX_PAGE_SIZE_BITS; | |
899 | idx += context + nsp->partid; | |
900 | ||
901 | idx &= SS_TLB_HASH_MASK; | |
902 | ||
903 | /* | |
904 | * So we search for a matching page using the info we have in the | |
905 | * hash - while another thread might possibly be removing or | |
906 | * inserting an entry into the same table. | |
907 | */ | |
908 | ||
909 | ||
910 | for ( tep = tlbp->hash[idx].ptr; tep!=(tlb_entry_t*)0; tep = tep->nextp ) { | |
911 | /* try and match the entry as appropriate */ | |
912 | if (((tep->tag_pfn ^ va)>>tep->match_shift)==0 && tep->match_context==context && tep->partid == partid) goto itlb_match; | |
913 | } | |
914 | ||
915 | /* | |
916 | * Might need to search the TLB one more time based | |
917 | * on the shared context value. | |
918 | */ | |
919 | if (search_tlb_again) { | |
920 | search_tlb_again = false; | |
921 | context = nsp->pri_context1; | |
922 | goto n2_itlb_search; | |
923 | } | |
924 | ||
925 | RW_unlock(&tlbp->rwlock); | |
926 | ||
927 | DBGMISS( lprintf(sp->gid, "itlb miss: pc=%lx va=%lx ctx=%x\n", pc, va, miss_context); ); | |
928 | /* | |
929 | * If the MMU is "disabled" in privileged mode ... this is a real miss, not a | |
930 | * virtual translation miss, so the fault context and trap type is different | |
931 | */ | |
932 | if (nsp->immu.enabled) { | |
933 | miss_trap_type = ss_hardware_tablewalk(sp, &(nsp->immu), tlbp, va, | |
934 | context_type, &flags, &pa_offset); | |
935 | if (miss_trap_type == SS_trap_NONE) { | |
936 | pa += pa_offset; | |
937 | pa_tag += pa_offset; | |
938 | goto itlb_priv_test; | |
939 | } | |
940 | } else { | |
941 | miss_context = 0; /* null for ra->pa miss undefined ? */ | |
942 | miss_trap_type = SS_trap_instruction_real_translation_miss; | |
943 | } | |
944 | itlb_trap:; | |
945 | VA48_WARNING(sp, va); | |
946 | SET_ITLB_FAULT( nsp, va ); | |
947 | nsp->immu.tag_access_reg = (va & ~MASK64(12,0)) | miss_context; /* FIXME: - do properly later */ | |
948 | DBGMMU( lprintf(sp->gid, "miss_trap_type=0x%x " | |
949 | "IMMU tag access = 0x%llx\n", | |
950 | miss_trap_type, nsp->immu.tag_access_reg); ); | |
951 | MEMORY_ACCESS_TRAP(); | |
952 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)miss_trap_type); | |
953 | return; | |
954 | ||
955 | itlb_match:; | |
956 | ||
957 | /* | |
958 | * try and match the entry again for multi-hit | |
959 | */ | |
960 | for (tmp_tep = tep->nextp; tmp_tep != (tlb_entry_t*)0; tmp_tep = tmp_tep->nextp) { | |
961 | if (((tmp_tep->tag_pfn ^ va) >> tmp_tep->match_shift) == 0 | |
962 | && tmp_tep->match_context == context && tmp_tep->partid == partid) { | |
963 | ||
964 | RW_unlock(&tlbp->rwlock); | |
965 | ||
966 | DBGMMU( lprintf(sp->gid, "itlb miss multi-hit: pc=%lx va=%lx ctx=%x\n", | |
967 | pc, va, context); ); | |
968 | DBGMMU( lprintf(sp->gid, " 0x%x %d 0x%llx 0x%llx\n", | |
969 | tep->tag_context, tep->match_shift, tep->tag_pfn, | |
970 | tep->tag_pfn + tep->pa_offset); ); | |
971 | DBGMMU( lprintf(sp->gid, " 0x%x %d 0x%llx 0x%llx\n", | |
972 | tmp_tep->tag_context, tmp_tep->match_shift, | |
973 | tmp_tep->tag_pfn, tmp_tep->tag_pfn + tmp_tep->pa_offset); ); | |
974 | ||
975 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)SS_trap_instruction_access_MMU_error); | |
976 | return; | |
977 | } | |
978 | } | |
979 | ||
980 | flags = tep->flags; | |
981 | pa += tep->pa_offset; | |
982 | pa_tag += tep->pa_offset; | |
983 | ||
984 | RW_unlock(&tlbp->rwlock); | |
985 | ||
986 | itlb_priv_test:; | |
987 | ||
988 | #if ERROR_INJECTION | |
989 | /* | |
990 | * Errors on itlb hit: stash table_entry pointer and if | |
991 | * subsequent itlb hit on same entry post error again. | |
992 | */ | |
993 | if (itlb_hit_error_match(sp, tep)) | |
994 | return; | |
995 | #endif | |
996 | ||
997 | /* | |
998 | * privilege test | |
999 | */ | |
1000 | if ( (flags & SS_TLB_FLAG_PRIV) && v9p->state == V9_User) { | |
1001 | miss_trap_type = SS_trap_IAE_privilege_violation; | |
1002 | goto itlb_trap; | |
1003 | } | |
1004 | if (flags & SS_TLB_FLAG_NFO) { | |
1005 | miss_trap_type = SS_trap_IAE_NFO_page; | |
1006 | goto itlb_trap; | |
1007 | } | |
1008 | } else { | |
1009 | /* Niagara 2 only implements 40 bits of PA, the tlb code | |
1010 | masks PA so here we need to mask bypass PAs */ | |
1011 | pa &= MASK64(39,0); | |
1012 | } | |
1013 | ||
1014 | /* | |
1015 | * OK - now go get the instructions to fill in the xc-line | |
1016 | * ... start by finding the device that has the | |
1017 | * memory we need. | |
1018 | * optimise: by guessing at the last device found. | |
1019 | * | |
1020 | */ | |
1021 | ||
1022 | /* now find the device - looking in the cache first */ | |
1023 | ||
1024 | cap = sp->xic_miss_addrp; | |
1025 | ||
1026 | if (!(cap && (cap->baseaddr <= pa) && (pa < cap->topaddr))) { | |
1027 | domain_t * domainp; | |
1028 | config_proc_t * config_procp; | |
1029 | ||
1030 | config_procp = sp->config_procp; | |
1031 | domainp = config_procp->domainp; | |
1032 | ||
1033 | cap = find_domain_address(domainp, pa); | |
1034 | if (cap == NULL) { | |
1035 | /* OK it's a bus error there was no backing store */ | |
1036 | ||
1037 | fatal("bus error - instruction fetch from pc=0x%llx " | |
1038 | "(cacheline va=0x%llx -> physical 0x%llx)", pc, va, pa); /* FIXME */ | |
1039 | } | |
1040 | ||
1041 | sp->xic_miss_addrp = cap; /* cache for next time */ | |
1042 | } | |
1043 | ||
1044 | /* try and get the buffer pointer */ | |
1045 | ||
1046 | extent = cap->config_devp->dev_typep->dev_cacheable(cap, DA_Instn, pa_tag-cap->baseaddr, &bufp); | |
1047 | ||
1048 | if (extent < XICACHE_LINE_SIZE) { | |
1049 | /* bus error again ? or fill from multiple devices ? */ | |
1050 | fatal("fix bus error 2"); | |
1051 | /* FIXME */ | |
1052 | } | |
1053 | ||
1054 | #if ERROR_INJECTION | |
1055 | /* | |
1056 | * Errors on ifetch to icache or L2 cache | |
1057 | * Make sure the L2 cache is enabled | |
1058 | */ | |
1059 | xicache_error_match(sp, pa); | |
1060 | ||
1061 | #endif | |
1062 | ||
1063 | xc_linep->tag = tag | sp->tagstate; | |
1064 | xc_linep->memoryoffset = ((uint64_t)bufp)-tag; | |
1065 | ||
1066 | /* | |
1067 | * FIXME: If breakpoints are in use make sure we really clear the decoded line | |
1068 | * to ensure that we dont get instruction aliasing. XI-cache prob. needs a re-design | |
1069 | * from this standpoint - but this will wait until we complete the JIT version. | |
1070 | * Until then this is a reminder and a place holder. | |
1071 | */ | |
1072 | if (sp->bp_infop) xicache_clobber_line_decodes(sp, tag); | |
1073 | #if 0 /* { */ | |
1074 | xicache_line_fill_risc4(sp, xc_linep, tag, bufp); | |
1075 | #endif /* } */ | |
1076 | } | |
1077 | ||
1078 | static char valid_asi_map[256] = { | |
1079 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
1080 | 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, /* 0? */ | |
1081 | 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, /* 1? */ | |
1082 | 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, /* 2? */ | |
1083 | 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, /* 3? */ | |
1084 | 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, /* 4? */ | |
1085 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 5? */ | |
1086 | 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* 6? */ | |
1087 | 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7? */ | |
1088 | 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /* 8? */ | |
1089 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9? */ | |
1090 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a? */ | |
1091 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b? */ | |
1092 | 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, /* c? */ | |
1093 | 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /* d? */ | |
1094 | 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, /* e? */ | |
1095 | 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, /* f? */ | |
1096 | }; | |
1097 | ||
1098 | /* | |
1099 | * This is not the worlds most efficient routine, but then we assume that ASI's are | |
1100 | * not frequently occurring memory access types - we may have to fast path the | |
1101 | * ASI_AS_IF_USER_PRIMARY etc. some how if used frequently by kernel b-copy. | |
1102 | */ | |
1103 | ||
1104 | ||
1105 | void | |
1106 | ss_asi_access(simcpu_t * sp, maccess_t op, uint_t regnum, uint_t asi, | |
1107 | uint64_t reg1, uint64_t reg2, asi_flag_t asi_flag) | |
1108 | { | |
1109 | sparcv9_cpu_t * v9p; | |
1110 | ss_strand_t * nsp; | |
1111 | ss_proc_t *npp; | |
1112 | uint64_t val; | |
1113 | ss_tsb_info_t * tsbinfop, * tsbinfop1; | |
1114 | ss_mmu_t * mmup; | |
1115 | ss_tlb_t * tlbp; | |
1116 | bool_t is_load; | |
1117 | uint_t size, mask; | |
1118 | uint_t context_type, idx; | |
1119 | tvaddr_t addr; | |
1120 | mem_flags_t mflags; | |
1121 | bool_t is_real; | |
1122 | sparcv9_trap_type_t tt; | |
1123 | error_conf_t * ep; | |
1124 | uint_t core_num; | |
1125 | ||
1126 | v9p = (sparcv9_cpu_t *)(sp->specificp); | |
1127 | nsp = v9p->impl_specificp; | |
1128 | npp = (ss_proc_t *)(sp->config_procp->procp); | |
1129 | ||
1130 | ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]); | |
1131 | ||
1132 | if (asi == V9_ASI_IMPLICIT) | |
1133 | goto no_asi_valid_checks; | |
1134 | ||
1135 | /* | |
1136 | * First check if this is a legitimate ASI based | |
1137 | * on current privilege level. | |
1138 | */ | |
1139 | ||
1140 | /* | |
1141 | * Niagara 2 prioritizes invalid ASI over privileged action. | |
1142 | */ | |
1143 | if (!valid_asi_map[asi]) { | |
1144 | v9p->post_precise_trap(sp, | |
1145 | (sparcv9_trap_type_t)SS_trap_DAE_invalid_ASI); | |
1146 | return; | |
1147 | } | |
1148 | ||
1149 | switch( v9p->state ) { | |
1150 | case V9_User: | |
1151 | ASSERT( !v9p->pstate.priv && !v9p->hpstate.hpriv ); | |
1152 | if (asi<0x80) { | |
1153 | v9p->post_precise_trap(sp, Sparcv9_trap_privileged_action); | |
1154 | return; | |
1155 | } | |
1156 | break; | |
1157 | case V9_Priv: | |
1158 | ASSERT( v9p->pstate.priv && !v9p->hpstate.hpriv ); | |
1159 | if (asi>=0x30 && asi<0x80) { | |
1160 | /* ASIs reserved for hpriv mode appear to priv mode as data access exceptions */ | |
1161 | MEMORY_ACCESS_TRAP(); | |
1162 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)SS_trap_privileged_action); | |
1163 | return; | |
1164 | } | |
1165 | break; | |
1166 | case V9_HyperPriv: | |
1167 | ASSERT( v9p->hpstate.hpriv ); | |
1168 | break; | |
1169 | case V9_RED: | |
1170 | ASSERT( v9p->hpstate.red ); | |
1171 | break; | |
1172 | default: | |
1173 | abort(); | |
1174 | } | |
1175 | ||
1176 | no_asi_valid_checks:; | |
1177 | ||
1178 | /* | |
1179 | * Next pull out all the memory access ASIs ... | |
1180 | */ | |
1181 | ||
1182 | mflags = (V9_User != v9p->state) ? MF_Has_Priv : 0; | |
1183 | context_type = ss_ctx_reserved; | |
1184 | mask = (1<<(op & MA_Size_Mask))-1; | |
1185 | ||
1186 | switch(asi) { | |
1187 | case V9_ASI_IMPLICIT: | |
1188 | if (v9p->tl > 0) { | |
1189 | asi = (v9p->pstate.cle) ? SS_ASI_NUCLEUS_LITTLE : SS_ASI_NUCLEUS; | |
1190 | goto ss_asi_nucleus; | |
1191 | } | |
1192 | asi = (v9p->pstate.cle) ? SS_ASI_PRIMARY_LITTLE : SS_ASI_PRIMARY; | |
1193 | goto ss_asi_primary; | |
1194 | ||
1195 | case SS_ASI_NUCLEUS_LITTLE: | |
1196 | case SS_ASI_NUCLEUS: | |
1197 | ss_asi_nucleus:; | |
1198 | asi_nuc:; | |
1199 | context_type = ss_ctx_nucleus; | |
1200 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1201 | if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access; | |
1202 | goto memory_access; | |
1203 | ||
1204 | case SS_ASI_PRIMARY_NO_FAULT_LITTLE: | |
1205 | case SS_ASI_PRIMARY_NO_FAULT: | |
1206 | if (IS_V9_MA_STORE(op & MA_Op_Mask)) | |
1207 | goto data_access_exception; | |
1208 | mflags |= MF_No_Fault; | |
1209 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1210 | goto asi_prim; | |
1211 | ||
1212 | case SS_ASI_AS_IF_USER_PRIMARY_LITTLE: | |
1213 | case SS_ASI_AS_IF_USER_PRIMARY: | |
1214 | mflags &= ~MF_Has_Priv; | |
1215 | goto asi_prim; | |
1216 | ||
1217 | case SS_ASI_PRIMARY_LITTLE: /* (88) RW Implicit Primary Address space (LE) */ | |
1218 | case SS_ASI_PRIMARY: /* (80) RW Implicit Primary Address space */ | |
1219 | ss_asi_primary:; | |
1220 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1221 | asi_prim:; | |
1222 | if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access; | |
1223 | context_type = ss_ctx_primary; | |
1224 | goto memory_access; | |
1225 | ||
1226 | case SS_ASI_SECONDARY_NO_FAULT_LITTLE: | |
1227 | case SS_ASI_SECONDARY_NO_FAULT: | |
1228 | if (IS_V9_MA_STORE(op & MA_Op_Mask)) | |
1229 | goto data_access_exception; | |
1230 | mflags |= MF_No_Fault; | |
1231 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1232 | goto asi_sec; | |
1233 | ||
1234 | case SS_ASI_AS_IF_USER_SECONDARY_LITTLE: | |
1235 | case SS_ASI_AS_IF_USER_SECONDARY: | |
1236 | mflags &= ~MF_Has_Priv; | |
1237 | goto asi_sec; | |
1238 | ||
1239 | case SS_ASI_SECONDARY_LITTLE: | |
1240 | case SS_ASI_SECONDARY: | |
1241 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1242 | asi_sec:; | |
1243 | if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access; | |
1244 | context_type = ss_ctx_secondary; | |
1245 | goto memory_access; | |
1246 | ||
1247 | case SS_ASI_REAL_IO_LITTLE: /* (1D) RW Same as ASI_PHYS_USE_EC_LITTLE for memory | |
1248 | addresses. For IO addresses, physical address, | |
1249 | non-cacheable, with side-effect (LE) */ | |
1250 | case SS_ASI_REAL_IO: /* (15) RW Same as ASI_PHYS_USE_EC for memory addresses. | |
1251 | For IO addresses, physical address, non-cacheable, | |
1252 | with side-effect */ | |
1253 | mflags |= MF_IO_Access; | |
1254 | mflags |= MF_TLB_Real_Ctx; | |
1255 | context_type = ss_ctx_nucleus; | |
1256 | goto memory_access; | |
1257 | ||
1258 | case SS_ASI_REAL_MEM_LITTLE: /* (1C) RW physical address, non-allocating in L1 cache */ | |
1259 | case SS_ASI_REAL_MEM: /* (14) RW physical address, non-allocating in L1 cache */ | |
1260 | mflags |= MF_TLB_Real_Ctx; | |
1261 | if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access; | |
1262 | context_type = ss_ctx_nucleus; | |
1263 | goto memory_access; | |
1264 | ||
1265 | case SS_ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE: /* RW 64B block load/store, primary address space, user privilege (LE) */ | |
1266 | case SS_ASI_BLOCK_AS_IF_USER_PRIMARY: /* RW 64B block load/store, primary address space, user privilege */ | |
1267 | mflags &= ~MF_Has_Priv; | |
1268 | goto asi_blk_prim; | |
1269 | ||
1270 | case SS_ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE: /* RW 64B block load/store, secondary address space, user privilege (LE) */ | |
1271 | case SS_ASI_BLOCK_AS_IF_USER_SECONDARY: /* RW 64B block load/store, secondary address space, user privilege */ | |
1272 | mflags &= ~MF_Has_Priv; | |
1273 | goto asi_blk_sec; | |
1274 | ||
1275 | case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P_LITTLE: /* Block initializing store/128b atomic LDDA, primary address, user priv (LE) */ | |
1276 | case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P: /* Block initializing store/128b atomic LDDA, primary address, user privilege */ | |
1277 | mflags &= ~MF_Has_Priv; | |
1278 | goto blk_init_prim; | |
1279 | ||
1280 | case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S_LITTLE: /* Block initializing store, secondary address, user privilege (LE) */ | |
1281 | case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S: /* Block initializing store/128b atomic LDDA, secondary address, user privilege */ | |
1282 | mflags &= ~MF_Has_Priv; | |
1283 | goto blk_init_sec; | |
1284 | ||
1285 | case SS_ASI_QUAD_LDD_LITTLE: /* 128b atomic LDDA (LE) */ | |
1286 | case SS_ASI_QUAD_LDD: /* 128b atomic LDDA */ | |
1287 | /* This ASI must be used with an LDDA instruction */ | |
1288 | if (MA_lddu64 != op) { | |
1289 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1290 | return; | |
1291 | } | |
1292 | /* Adjust size to 128bytes so alignment is correct */ | |
1293 | op = MA_lddu128; | |
1294 | mask = (1<<(op & MA_Size_Mask))-1; | |
1295 | mflags |= MF_Atomic_Access; | |
1296 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1297 | context_type = ss_ctx_nucleus; | |
1298 | goto memory_access; | |
1299 | ||
1300 | case SS_ASI_QUAD_LDD_REAL_LITTLE: /* 128b atomic LDDA, real address (LE) */ | |
1301 | case SS_ASI_QUAD_LDD_REAL: /* 128b atomic LDDA, real address */ | |
1302 | /* This ASI must be used with an LDDA instruction */ | |
1303 | if (MA_lddu64 != op) { | |
1304 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1305 | return; | |
1306 | } | |
1307 | /* Adjust size to 128bytes so alignment is correct */ | |
1308 | op = MA_lddu128; | |
1309 | mask = (1<<(op & MA_Size_Mask))-1; | |
1310 | mflags |= MF_Atomic_Access; | |
1311 | mflags |= MF_TLB_Real_Ctx; | |
1312 | context_type = ss_ctx_nucleus; | |
1313 | goto memory_access; | |
1314 | ||
1315 | case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD_LITTLE: /* Block initializing store/128b atomic LDDA (LE) */ | |
1316 | case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD: /* Block initializing store/128b atomic LDDA */ | |
1317 | if (MA_lddu64 == op) { | |
1318 | op = MA_lddu128; | |
1319 | mask = (1<<(op & MA_Size_Mask))-1; | |
1320 | mflags |= MF_Atomic_Access; | |
1321 | goto asi_nuc; | |
1322 | } else | |
1323 | if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) { | |
1324 | /* block init effect */ | |
1325 | addr = ((op & MA_Op_Mask) == MA_CAS) ? | |
1326 | reg1 : (reg1 + reg2); | |
1327 | if ((addr & 0x3f) == 0) | |
1328 | mflags |= MF_Blk_Init; | |
1329 | goto asi_nuc; | |
1330 | ||
1331 | } | |
1332 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1333 | return; | |
1334 | ||
1335 | case SS_ASI_BLK_INIT_ST_QUAD_LDD_P_LITTLE: /* Block initializing store/128b atomic LDDA, primary address (LE) */ | |
1336 | case SS_ASI_BLK_INIT_ST_QUAD_LDD_P: /* Block initializing store/128b atomic LDDA, primary address */ | |
1337 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1338 | blk_init_prim:; | |
1339 | if (MA_lddu64 == op) { | |
1340 | op = MA_lddu128; | |
1341 | mask = (1<<(op & MA_Size_Mask))-1; | |
1342 | mflags |= MF_Atomic_Access; | |
1343 | goto asi_prim; | |
1344 | } else | |
1345 | if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) { | |
1346 | /* block init effect */ | |
1347 | addr = ((op & MA_Op_Mask) == MA_CAS) ? | |
1348 | reg1 : (reg1 + reg2); | |
1349 | if ((addr & 0x3f) == 0) | |
1350 | mflags |= MF_Blk_Init; | |
1351 | goto asi_prim; | |
1352 | ||
1353 | } | |
1354 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1355 | return; | |
1356 | ||
1357 | case SS_ASI_BLK_INIT_ST_QUAD_LDD_S_LITTLE: /* Block initializing store/128b atomic LDDA, secondary address (LE) */ | |
1358 | case SS_ASI_BLK_INIT_ST_QUAD_LDD_S: /* Block initializing store/128b atomic LDDA, secondary address */ | |
1359 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1360 | blk_init_sec:; | |
1361 | if (MA_lddu64 == op) { | |
1362 | op = MA_lddu128; | |
1363 | mask = (1<<(op & MA_Size_Mask))-1; | |
1364 | mflags |= MF_Atomic_Access; | |
1365 | goto asi_sec; | |
1366 | } else | |
1367 | if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) { | |
1368 | /* block init effect */ | |
1369 | addr = ((op & MA_Op_Mask) == MA_CAS) ? | |
1370 | reg1 : (reg1 + reg2); | |
1371 | if ((addr & 0x3f) == 0) | |
1372 | mflags |= MF_Blk_Init; | |
1373 | goto asi_sec; | |
1374 | ||
1375 | } | |
1376 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1377 | return; | |
1378 | ||
1379 | case SS_ASI_BLK_PL: /* 64B block load/store, primary address (LE) */ | |
1380 | case SS_ASI_BLK_COMMIT_P: /* Same as SS_ASI_BLK_P on N2 (no commit) */ | |
1381 | case SS_ASI_BLK_P: /* 64B block load/store, primary address */ | |
1382 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1383 | asi_blk_prim:; | |
1384 | /* This ASI must be used with an LDDFA/STDFA instruction */ | |
1385 | if (!(MA_ldfp64 == op || MA_stfp64 == op) || | |
1386 | ((regnum & 0xf) != 0)) { | |
1387 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1388 | return; | |
1389 | } | |
1390 | op = (MA_ldfp64 == op) ? (MA_Size512 | MA_LdFloat) : | |
1391 | (MA_Size512 | MA_StFloat); | |
1392 | mask = (1<<(op & MA_Size_Mask))-1; | |
1393 | mflags |= MF_Atomic_Access; | |
1394 | goto asi_prim; | |
1395 | ||
1396 | case SS_ASI_BLK_SL: /* 64B block load/store, secondary address (LE) */ | |
1397 | case SS_ASI_BLK_COMMIT_S: /* Same as SS_ASI_BLK_S on N2 (no commit) */ | |
1398 | case SS_ASI_BLK_S: /* 64B block load/store, secondary address */ | |
1399 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1400 | asi_blk_sec:; | |
1401 | /* This ASI must be used with an LDDFA/STDFA instruction */ | |
1402 | if (!(MA_ldfp64 == op || MA_stfp64 == op) || | |
1403 | ((regnum & 0xf) != 0)) { | |
1404 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1405 | return; | |
1406 | } | |
1407 | op = (MA_ldfp64 == op) ? (MA_Size512 | MA_LdFloat) : | |
1408 | (MA_Size512 | MA_StFloat); | |
1409 | mask = (1<<(op & MA_Size_Mask))-1; | |
1410 | mflags |= MF_Atomic_Access; | |
1411 | goto asi_sec; | |
1412 | ||
1413 | case SS_ASI_PST8_PL: | |
1414 | case SS_ASI_PST8_P: | |
1415 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1416 | /* This ASI must be used with STDFA instruction */ | |
1417 | if (!(MA_stfp64 == op)) { | |
1418 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1419 | return; | |
1420 | } | |
1421 | goto asi_prim; | |
1422 | ||
1423 | case SS_ASI_PST8_SL: | |
1424 | case SS_ASI_PST8_S: | |
1425 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1426 | /* This ASI must be used with STDFA instruction */ | |
1427 | if (!(MA_stfp64 == op)) { | |
1428 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1429 | return; | |
1430 | } | |
1431 | goto asi_sec; | |
1432 | ||
1433 | case SS_ASI_PST16_PL: | |
1434 | case SS_ASI_PST16_P: | |
1435 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1436 | /* This ASI must be used with STDFA instruction */ | |
1437 | if (!(MA_stfp64 == op)) { | |
1438 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1439 | return; | |
1440 | } | |
1441 | goto asi_prim; | |
1442 | ||
1443 | case SS_ASI_PST16_SL: | |
1444 | case SS_ASI_PST16_S: | |
1445 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1446 | /* This ASI must be used with STDFA instruction */ | |
1447 | if (!(MA_stfp64 == op)) { | |
1448 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1449 | return; | |
1450 | } | |
1451 | goto asi_sec; | |
1452 | ||
1453 | case SS_ASI_PST32_PL: | |
1454 | case SS_ASI_PST32_P: | |
1455 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1456 | /* This ASI must be used with STDFA instruction */ | |
1457 | if (!(MA_stfp64 == op)) { | |
1458 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1459 | return; | |
1460 | } | |
1461 | goto asi_prim; | |
1462 | ||
1463 | case SS_ASI_PST32_SL: | |
1464 | case SS_ASI_PST32_S: | |
1465 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1466 | /* This ASI must be used with STDFA instruction */ | |
1467 | if (!(MA_stfp64 == op)) { | |
1468 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1469 | return; | |
1470 | } | |
1471 | goto asi_sec; | |
1472 | ||
1473 | case SS_ASI_FL8_PL: | |
1474 | case SS_ASI_FL8_P: | |
1475 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1476 | /* This ASI must be used with an LDDFA/STDFA instruction */ | |
1477 | if (!(MA_ldfp64 == op || MA_stfp64 == op)) { | |
1478 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1479 | return; | |
1480 | } | |
1481 | op = (MA_ldfp64 == op) ? MA_ldfp8: MA_stfp8; | |
1482 | mask = (1<<(op & MA_Size_Mask))-1; | |
1483 | goto asi_prim; | |
1484 | ||
1485 | case SS_ASI_FL8_SL: | |
1486 | case SS_ASI_FL8_S: | |
1487 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1488 | /* This ASI must be used with an LDDFA/STDFA instruction */ | |
1489 | if (!(MA_ldfp64 == op || MA_stfp64 == op)) { | |
1490 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1491 | return; | |
1492 | } | |
1493 | op = (MA_ldfp64 == op) ? MA_ldfp8: MA_stfp8; | |
1494 | mask = (1<<(op & MA_Size_Mask))-1; | |
1495 | goto asi_sec; | |
1496 | ||
1497 | case SS_ASI_FL16_PL: | |
1498 | case SS_ASI_FL16_P: | |
1499 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1500 | /* This ASI must be used with an LDDFA/STDFA instruction */ | |
1501 | if (!(MA_ldfp64 == op || MA_stfp64 == op)) { | |
1502 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1503 | return; | |
1504 | } | |
1505 | op = (MA_ldfp64 == op) ? MA_ldfp16: MA_stfp16; | |
1506 | mask = (1<<(op & MA_Size_Mask))-1; | |
1507 | goto asi_prim; | |
1508 | ||
1509 | case SS_ASI_FL16_SL: | |
1510 | case SS_ASI_FL16_S: | |
1511 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; | |
1512 | /* This ASI must be used with an LDDFA/STDFA instruction */ | |
1513 | if (!(MA_ldfp64 == op || MA_stfp64 == op)) { | |
1514 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
1515 | return; | |
1516 | } | |
1517 | op = (MA_ldfp64 == op) ? MA_ldfp16: MA_stfp16; | |
1518 | mask = (1<<(op & MA_Size_Mask))-1; | |
1519 | goto asi_sec; | |
1520 | ||
1521 | memory_access:; | |
1522 | if ((MA_LdFloat == (op & MA_Op_Mask)) || (MA_StFloat == (op & MA_Op_Mask)) ) { | |
1523 | ss_memory_asi_access(sp, op, (uint64_t *)&(sp->fpreg.s32[regnum]), mflags, asi, context_type, mask, reg1, reg2); | |
1524 | } else { | |
1525 | ss_memory_asi_access(sp, op, &(sp->intreg[regnum]), mflags, asi, context_type, mask, reg1, reg2); | |
1526 | ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]); | |
1527 | } | |
1528 | return; | |
1529 | ||
1530 | default: | |
1531 | break; | |
1532 | } | |
1533 | ||
1534 | ||
1535 | ||
1536 | /* OK, derive access address etc. */ | |
1537 | ||
1538 | size = op & MA_Size_Mask; | |
1539 | op &= MA_Op_Mask; | |
1540 | is_load = IS_V9_MA_LOAD(op); | |
1541 | ||
1542 | /* No MA_CAS case required for cpu state registers. */ | |
1543 | addr = reg1 + reg2; | |
1544 | ||
1545 | ||
1546 | /* | |
1547 | * Finally all the cpu state registers ... | |
1548 | * Currently only 64bit accesses supported .. | |
1549 | * need to ascertain exactly what niagara does here ! FIXME | |
1550 | * FIXME: Of course all the alt address space accesses are different here ! | |
1551 | */ | |
1552 | ||
1553 | if (size != MA_Size64 || (addr&0x7)!=0 || IS_V9_MA_ATOMIC(op)) | |
1554 | goto data_access_exception; | |
1555 | ||
1556 | ASSERT(MA_LdSigned != op); /* not signed for any stxas or for ldxas */ | |
1557 | ||
1558 | #define ITODO(s) do { \ | |
1559 | IMPL_WARNING(("Unimplemented niagara2 asi %s (0x%x) accessed with address 0x%llx @ pc=%lx\n", #s, asi, addr, sp->pc)); \ | |
1560 | if (is_load) { val = 0; goto load_complete; }\ | |
1561 | } while (0) | |
1562 | ||
1563 | #if ERROR_TRAP_GEN /* { */ | |
1564 | #define TODO(s) ITODO(s) | |
1565 | #else /* } ERROR_TRAP_GEN { */ | |
1566 | #define TODO(s) fatal("Unimplemented niagara2 asi %s (0x%x) accessed with address 0x%llx @ pc=%lx\n", #s, asi, addr, sp->pc) | |
1567 | #endif /* } ERROR_TRAP_GEN { */ | |
1568 | ||
1569 | /* If we're storing fetch the value to stuff */ | |
1570 | if (!is_load) { | |
1571 | if (op == MA_St) { | |
1572 | val = sp->intreg[regnum]; | |
1573 | } else { /* MA_StFloat */ | |
1574 | switch(size) { | |
1575 | case MA_Size32: | |
1576 | val = sp->fpreg.s32[regnum]; | |
1577 | break; | |
1578 | case MA_Size64: | |
1579 | val = sp->fpreg.s64[regnum >> 1]; | |
1580 | break; | |
1581 | default: | |
1582 | goto unimplemented; | |
1583 | } | |
1584 | } | |
1585 | }; | |
1586 | /* Hex Access VA Repli- DESCRIPTION */ | |
1587 | /* (hex) cated */ | |
1588 | switch(asi) { | |
1589 | ||
1590 | /* MANDATORY SPARC V9 ASIs */ | |
1591 | ||
1592 | /* All in the memory section above */ | |
1593 | ||
1594 | /* SunSPARC EXTENDED (non-V9) ASIs */ | |
1595 | ||
1596 | case SS_ASI_SCRATCHPAD: | |
1597 | /* | |
1598 | * 0x20 RW 0-18 Y Scratchpad Registers | |
1599 | * 0x20 - 20-28 N any type of access causes data_access_exception | |
1600 | * 0x20 RW 30-38 Y Scratchpad Registers | |
1601 | */ | |
1602 | ||
1603 | if (INVALID_SCRATCHPAD(addr)) { | |
1604 | goto data_access_exception; | |
1605 | } else { | |
1606 | uint64_t * valp = | |
1607 | &(nsp->strand_reg[SSR_ScratchPad0 + (addr>>3)]); | |
1608 | if (is_load) { | |
1609 | val = *valp; | |
1610 | goto load_complete; | |
1611 | } | |
1612 | DBGSCRATCH( if (*valp != val) | |
1613 | lprintf(sp->gid, "SCRATCH store 0x%x/0x%llx: " | |
1614 | "0x%llx -> 0x%llx pc=0x%llx\n", | |
1615 | asi, addr, *valp, val, sp->pc); ); | |
1616 | *valp = val; | |
1617 | } | |
1618 | break; | |
1619 | ||
1620 | case SS_ASI_MMU: | |
1621 | /* Niagara 1: | |
1622 | * 0x21 RW 8 Y I/DMMU Primary Context Register | |
1623 | * 0x21 RW 10 Y DMMU Secondary Context Register | |
1624 | * 0x21 RW 120 Y I/DMMU Synchronous Fault Pointer | |
1625 | * Niagara 2: | |
1626 | * 0x21 RW 108 Y I/DMMU Primary Context Register 1 | |
1627 | * 0x21 RW 110 Y DMMU Secondary Context Register 1 | |
1628 | */ | |
1629 | if (is_load) { | |
1630 | switch(addr) { | |
1631 | case 0x08: | |
1632 | val = (uint64_t)(nsp->pri_context); | |
1633 | goto load_complete; | |
1634 | case 0x10: | |
1635 | val = (uint64_t)(nsp->sec_context); | |
1636 | goto load_complete; | |
1637 | case 0x108: | |
1638 | val = (uint64_t)(nsp->pri_context1); | |
1639 | goto load_complete; | |
1640 | case 0x110: | |
1641 | val = (uint64_t)(nsp->sec_context1); | |
1642 | goto load_complete; | |
1643 | default: | |
1644 | break; | |
1645 | } | |
1646 | goto data_access_exception; | |
1647 | } else { | |
1648 | /* | |
1649 | * Since we're changing a context register we should | |
1650 | * flush the xi and xd trans caches. However, this only matters | |
1651 | * for the primary context - iff we are in priv mode with | |
1652 | * TL=0. For all other cases (TL>0) or hpriv=1, either the | |
1653 | * MMU is not in use, or we're executing the nucleus context so | |
1654 | * we can rely on a done/retry instn / mode change to do the flush for us | |
1655 | * when we change mode later. | |
1656 | */ | |
1657 | DBGMMU( lprintf(sp->gid, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); | |
1658 | switch(addr) { | |
1659 | case 0x08: | |
1660 | val &= MASK64(12,0); | |
1661 | if (nsp->pri_context!=val || | |
1662 | nsp->pri_context1!=val) { | |
1663 | sp->xicache_trans_flush_pending = true; | |
1664 | sp->xdcache_trans_flush_pending = true; | |
1665 | xcache_set_tagstate(sp); | |
1666 | } | |
1667 | nsp->pri_context = val; | |
1668 | /* | |
1669 | * update the corresponding second context register for | |
1670 | * backwards compability | |
1671 | */ | |
1672 | nsp->pri_context1 = val; | |
1673 | break; | |
1674 | case 0x10: | |
1675 | nsp->sec_context = val & MASK64(12,0); | |
1676 | /* | |
1677 | * update the corresponding second context register for | |
1678 | * backwards compability | |
1679 | */ | |
1680 | nsp->sec_context1 = val & MASK64(12,0); | |
1681 | break; | |
1682 | case 0x108: | |
1683 | val &= MASK64(12,0); | |
1684 | if (nsp->pri_context1!=val) { | |
1685 | sp->xicache_trans_flush_pending = true; | |
1686 | sp->xdcache_trans_flush_pending = true; | |
1687 | xcache_set_tagstate(sp); | |
1688 | } | |
1689 | nsp->pri_context1 = val; | |
1690 | break; | |
1691 | case 0x110: | |
1692 | nsp->sec_context1 = val & MASK64(12,0); | |
1693 | break; | |
1694 | default: | |
1695 | goto data_access_exception; | |
1696 | } | |
1697 | } | |
1698 | break; | |
1699 | ||
1700 | case SS_ASI_QUEUE: /* 0x25 RW 3C0 Y CPU Mondo Queue Head Pointer */ | |
1701 | /* 0x25 RW 3C8 Y CPU Mondo Queue Tail Pointer */ | |
1702 | /* 0x25 RW 3D0 Y Device Mondo Queue Head Pointer */ | |
1703 | /* 0x25 RW 3D8 Y Device Mondo Queue Tail Pointer */ | |
1704 | /* 0x25 RW 3E0 Y Resumable Error Queue Head Pointer */ | |
1705 | /* 0x25 RW 3E8 Y Resumable Error Queue Tail Pointer */ | |
1706 | /* 0x25 RW 3F0 Y Nonresumable Error Queue Head Pointer */ | |
1707 | /* 0x25 RW 3F8 Y Nonresumable Error Queue Tail Pointer */ | |
1708 | if (is_load) { | |
1709 | ||
1710 | switch(addr) { | |
1711 | case 0x3c0: | |
1712 | case 0x3d0: | |
1713 | case 0x3e0: | |
1714 | case 0x3f0: | |
1715 | val = (uint16_t)(nsp->nqueue[ (addr>>4) - 0x3c].head); | |
1716 | goto load_complete; | |
1717 | case 0x3c8: | |
1718 | case 0x3d8: | |
1719 | case 0x3e8: | |
1720 | case 0x3f8: | |
1721 | val = (uint16_t)(nsp->nqueue[(addr>>4) - 0x3c].tail); | |
1722 | goto load_complete; | |
1723 | default: | |
1724 | goto data_access_exception; | |
1725 | } | |
1726 | } else { | |
1727 | DBGMONDO( lprintf(sp->gid, "ASI_QUEUE store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); | |
1728 | ||
1729 | RSVD_MASK(sp, MASK64(17, 6), val, asi, addr); | |
1730 | switch(addr) { | |
1731 | case 0x3c0: | |
1732 | case 0x3d0: | |
1733 | case 0x3e0: | |
1734 | case 0x3f0: | |
1735 | nsp->nqueue[(addr>>4) - 0x3c].head = (uint16_t)val; | |
1736 | nsp->flag_queue_irq[(addr>>4)- 0x3c] = nsp->nqueue[(addr>>4) - 0x3c].head != nsp->nqueue[(addr>>4) - 0x3c].tail; | |
1737 | break; | |
1738 | case 0x3c8: | |
1739 | case 0x3d8: | |
1740 | case 0x3e8: | |
1741 | case 0x3f8: | |
1742 | if (v9p->state != V9_HyperPriv && | |
1743 | v9p->state != V9_RED) | |
1744 | goto data_access_exception; /* DAX if store to tail in privileged mode */ | |
1745 | nsp->nqueue[(addr>>4) - 0x3c].tail = (uint16_t)val; | |
1746 | nsp->flag_queue_irq[(addr>>4)- 0x3c] = nsp->nqueue[(addr>>4) - 0x3c].head != nsp->nqueue[(addr>>4) - 0x3c].tail; | |
1747 | break; | |
1748 | default: | |
1749 | goto data_access_exception; | |
1750 | } | |
1751 | ss_check_interrupts(sp); | |
1752 | } | |
1753 | break; | |
1754 | ||
1755 | #if INTERNAL_BUILD /* { */ | |
1756 | case SS_ASI_STREAM_MA: | |
1757 | /* 0x40 RW 0 N CWQ HEAD */ | |
1758 | /* 0x40 RW 8 N CWQ Tail */ | |
1759 | /* 0x40 RW 10 N CWQ First */ | |
1760 | /* 0x40 RW 18 N CWQ Last*/ | |
1761 | /* 0x40 RW 20 N CWQ CSR */ | |
1762 | /* 0x40 RW 28 N CWQ CSR_ENABLE */ | |
1763 | /* 0x40 RW 30 N CWQ SYNC */ | |
1764 | /* 0x40 RW 80 N MA CONTROL */ | |
1765 | /* 0x40 RW 88 N MA PHYS ADDR */ | |
1766 | /* 0x40 RW 90 N MA ADDR (8 elements) */ | |
1767 | /* 0x40 RW 98 N MA NP REG */ | |
1768 | /* 0x40 RW a0 N MA SYNC */ | |
1769 | ||
1770 | if ((op & MA_Op_Mask) == MA_St) { | |
1771 | ||
1772 | uint_t rv; | |
1773 | ||
1774 | #ifdef LOG_ASI_STREAM_MA_ACCESSES | |
1775 | EXEC_WARNING(("Got a store to the MA_STREAM ASI, " | |
1776 | "addr=0x%llx, value=0x%llx\n", addr, val)); | |
1777 | #endif | |
1778 | ||
1779 | if (size != MA_Size64) { | |
1780 | EXEC_WARNING(("Store MA size not 64 bits: " | |
1781 | "pc=%p ASI: %x addr: %p, " | |
1782 | "mod_arith \n", sp->pc, asi, addr)); | |
1783 | goto data_access_exception; | |
1784 | } | |
1785 | ||
1786 | switch(addr) { | |
1787 | case 0x0: | |
1788 | rv = set_CWQ_head_reg(sp, val); | |
1789 | break; | |
1790 | case 0x8: | |
1791 | rv = set_CWQ_tail_reg(sp, val); | |
1792 | break; | |
1793 | case 0x10: | |
1794 | rv = set_CWQ_first_reg(sp, val); | |
1795 | break; | |
1796 | case 0x18: | |
1797 | rv = set_CWQ_last_reg(sp, val); | |
1798 | break; | |
1799 | case 0x20: | |
1800 | /* set HWE, PrE, and enable bits */ | |
1801 | rv = set_CWQ_CSR_reg(sp, val, 0xdULL); | |
1802 | break; | |
1803 | case 0x28: | |
1804 | /* set only enable bit */ | |
1805 | rv = set_CWQ_CSR_reg(sp, val, 0x1ULL); | |
1806 | break; | |
1807 | /* 30 is CWQ_sync_reg, illegal */ | |
1808 | case 0x80: | |
1809 | rv = set_MA_ctl_reg(sp, val); | |
1810 | break; | |
1811 | case 0x88: | |
1812 | rv = set_MA_physad_reg(sp, val); | |
1813 | break; | |
1814 | case 0x90: | |
1815 | rv = set_MA_memad_reg(sp, val); | |
1816 | break; | |
1817 | case 0x98: | |
1818 | rv = set_MA_nprime_reg(sp, val); | |
1819 | break; | |
1820 | /* a0 is MA SYNC REG, illegal */ | |
1821 | default: | |
1822 | EXEC_WARNING(("DAX in ASI_STREAM_MA\n")); | |
1823 | goto data_access_exception; | |
1824 | ||
1825 | } | |
1826 | ||
1827 | switch (rv) { | |
1828 | case MA_STREAM_DONE: | |
1829 | break; | |
1830 | case MA_STREAM_MEM_ALIGN_TRAP: | |
1831 | EXEC_WARNING(("Mem align error in " | |
1832 | "ASI_STREAM_MA store\n")); | |
1833 | goto data_access_exception; | |
1834 | case MA_STREAM_DATA_ACCESS_EX_TRAP: | |
1835 | EXEC_WARNING(("DAX in ASI_STREAM_MA store\n")); | |
1836 | goto data_access_exception; | |
1837 | case MA_STREAM_ILLEGAL_INST_TRAP: | |
1838 | v9p->post_precise_trap(sp, | |
1839 | Sparcv9_trap_illegal_instruction); | |
1840 | return; | |
1841 | case MA_STREAM_FATAL: | |
1842 | EXEC_WARNING(("fatal error during MA register " | |
1843 | "store")); | |
1844 | fatal("fatal error during MA register store"); | |
1845 | break; | |
1846 | default: | |
1847 | FIXME_WARNING(("invalid return code from MA " | |
1848 | "register store")); | |
1849 | } | |
1850 | } else if ((op & MA_Op_Mask) == MA_Ld || | |
1851 | (op & MA_Op_Mask) == MA_LdSigned) { | |
1852 | ||
1853 | uint_t rv; | |
1854 | ||
1855 | #ifdef LOG_ASI_STREAM_MA_ACCESSES | |
1856 | EXEC_WARNING(("Got a load from the MA_STREAM ASI, " | |
1857 | "addr=0x%llx\n", addr)); | |
1858 | #endif | |
1859 | ||
1860 | if (size != MA_Size64) { | |
1861 | EXEC_WARNING(("Load from ASI_STREAM_MA, " | |
1862 | "size not " | |
1863 | "64 bits: " | |
1864 | "pc=%p ASI: %x addr: %p, mod_arith \n", | |
1865 | sp->pc, asi, addr)); | |
1866 | goto data_access_exception; | |
1867 | } | |
1868 | ||
1869 | switch(addr) { | |
1870 | case 0x0: | |
1871 | rv = query_CWQ_head_reg(sp, &val); | |
1872 | break; | |
1873 | case 0x8: | |
1874 | rv = query_CWQ_tail_reg(sp, &val); | |
1875 | break; | |
1876 | case 0x10: | |
1877 | rv = query_CWQ_first_reg(sp, &val); | |
1878 | break; | |
1879 | case 0x18: | |
1880 | rv = query_CWQ_last_reg(sp, &val); | |
1881 | break; | |
1882 | case 0x20: | |
1883 | rv = query_CWQ_CSR_reg(sp, &val); | |
1884 | break; | |
1885 | /* note: no 0x28, as CSR.enable is write only */ | |
1886 | case 0x30: | |
1887 | rv = query_CWQ_sync_reg(sp, &val); | |
1888 | break; | |
1889 | case 0x80: | |
1890 | rv = query_MA_ctl_reg(sp, &val); | |
1891 | break; | |
1892 | case 0x88: | |
1893 | rv = query_MA_physad_reg(sp, &val); | |
1894 | break; | |
1895 | case 0x90: | |
1896 | rv = query_MA_memad_reg(sp, &val); | |
1897 | break; | |
1898 | case 0x98: | |
1899 | rv = query_MA_nprime_reg(sp, &val); | |
1900 | break; | |
1901 | case 0xa0: | |
1902 | rv = query_MA_sync_reg(sp, &val); | |
1903 | break; | |
1904 | default: | |
1905 | EXEC_WARNING(("DAX in ASI_STREAM_MA load\n")); | |
1906 | goto data_access_exception; | |
1907 | } | |
1908 | ||
1909 | switch (rv) { | |
1910 | case MA_STREAM_LD_COMPLETE: | |
1911 | #if LOG_ASI_STREAM_MA_ACCESSES | |
1912 | EXEC_WARNING(("Got a load from the " | |
1913 | "MA_STREAM ASI," | |
1914 | "addr=0x%llx, returned 0x%llx\n", | |
1915 | addr, val)); | |
1916 | #endif | |
1917 | goto load_complete; | |
1918 | case MA_STREAM_MEM_ALIGN_TRAP: | |
1919 | EXEC_WARNING(("Alignment error in " | |
1920 | "ASI_STREAM_MA store\n")); | |
1921 | goto data_access_exception; | |
1922 | case MA_STREAM_DATA_ACCESS_EX_TRAP: | |
1923 | EXEC_WARNING(("DAX in ASI_STREAM_MA\n")); | |
1924 | goto data_access_exception; | |
1925 | case MA_STREAM_ILLEGAL_INST_TRAP: | |
1926 | v9p->post_precise_trap(sp, | |
1927 | Sparcv9_trap_illegal_instruction); | |
1928 | return; | |
1929 | case MA_STREAM_FATAL: | |
1930 | IMPL_WARNING(("fatal error during MA register " | |
1931 | "load")); | |
1932 | fatal("fatal error during ASI_STREAM_MA " | |
1933 | "register load"); | |
1934 | return; /* control won't actually get here */ | |
1935 | default: | |
1936 | fatal("Unexpected rv during ASI_STREAM_MA " | |
1937 | "register load"); | |
1938 | } | |
1939 | ||
1940 | } else { | |
1941 | EXEC_WARNING(("Illegal memory operation 0x%x to " | |
1942 | "STREAM ASI pc=%p ASI: %x addr: %p, " | |
1943 | "mod_arith \n", op, sp->pc, asi, addr)); | |
1944 | goto data_access_exception; | |
1945 | } | |
1946 | break; | |
1947 | ||
1948 | #endif /* INTERNAL_BUILD } */ | |
1949 | ||
1950 | case SS_ASI_CMP: /* 0x41 R 0 S Core Available */ | |
1951 | /* 0x41 R 10 S Core Enable Status */ | |
1952 | /* 0x41 RW 20 S Core Enable */ | |
1953 | /* 0x41 RW 30 S XIR Steering */ | |
1954 | /* 0x41 RW 38 S Tick Enable */ | |
1955 | /* 0x41 RW 40 S Error Steering, not implemented */ | |
1956 | /* 0x41 RW 50 S Core Running RW */ | |
1957 | /* 0x41 R 58 S Core Running Status */ | |
1958 | /* 0x41 W 60 S Core Running W1S */ | |
1959 | /* 0x41 W 68 S Core Running W1C */ | |
1960 | ||
1961 | if (is_load) { | |
1962 | switch(addr) { | |
1963 | case 0x0: | |
1964 | case 0x10: | |
1965 | case 0x20: | |
1966 | val = npp->cmp_regs.core_enable_status; | |
1967 | goto load_complete; | |
1968 | case 0x30: | |
1969 | IMPL_WARNING(("asi_xir_steering (asi: 0x%lx va: 0x%lx) not implemented\n",asi, addr)); | |
1970 | goto load_complete; | |
1971 | case 0x38: | |
1972 | val = npp->cmp_regs.tick_enable ? 1 : 0; | |
1973 | goto load_complete; | |
1974 | case 0x50: | |
1975 | case 0x58: | |
1976 | val = npp->cmp_regs.core_running_status; | |
1977 | goto load_complete; | |
1978 | case 0x60: | |
1979 | case 0x68: | |
1980 | /* | |
1981 | * ASI_CORE_RUNNING_{W1S, W1c}, write-only | |
1982 | */ | |
1983 | goto data_access_exception; | |
1984 | default: | |
1985 | break; | |
1986 | } | |
1987 | } else { | |
1988 | switch(addr) { | |
1989 | case 0x0: | |
1990 | case 0x10: | |
1991 | case 0x58: | |
1992 | goto data_access_exception; | |
1993 | case 0x20: | |
1994 | /* | |
1995 | * asi_core_enable | |
1996 | */ | |
1997 | IMPL_WARNING(("asi_core_enable: (asi: 0x%lx va: 0x%lx) not supported\n",asi, addr)); | |
1998 | goto complete; | |
1999 | case 0x30: | |
2000 | IMPL_WARNING(("asi_xir_steering (asi: 0x%lx va: 0x%lx) not implemented\n",asi, addr)); | |
2001 | goto complete; | |
2002 | case 0x38: | |
2003 | #ifdef VFALLS /* { */ | |
2004 | /* For multinode systems, should not be using ASI_CMP_TICK_ENABLE */ | |
2005 | if (sp->config_procp->domainp->procs.count > 1) | |
2006 | EXEC_WARNING(("For multinode systems, use of ASI_CMP_TICK_ENABLE(asi: 0x%lx va: 0x%lx)\n" | |
2007 | "is not recommended for tick sync purposes(VF PRM 0.1 Sec 3.1.1).\n", | |
2008 | asi, addr)); | |
2009 | #endif /* } VFALLS */ | |
2010 | RSVD_MASK(sp, MASK64(0,0), val, asi, addr); | |
2011 | pthread_mutex_lock(&npp->tick_en_lock); | |
2012 | #ifdef VFALLS | |
2013 | if (!npp->ncxp->tick_en_slow) | |
2014 | #endif | |
2015 | if (!val && !npp->tick_stop) { | |
2016 | sparcv9_cpu_t * tv9p; | |
2017 | simcpu_t * tsp; | |
2018 | ss_strand_t * tnsp; | |
2019 | ||
2020 | npp->tick_stop = true; | |
2021 | ||
2022 | /* now stop all tick counters */ | |
2023 | core_num = (uint_t) -1; | |
2024 | for (idx = 0; idx < npp->nstrands; idx++) { | |
2025 | tv9p = npp->strand[idx]; | |
2026 | tnsp = &(npp->ss_strandp[idx]); | |
2027 | if (tnsp->core != core_num) { | |
2028 | tv9p->tick->offset += RAW_TICK(tv9p); | |
2029 | core_num = tnsp->core; | |
2030 | } | |
2031 | tsp = tv9p->simp; | |
2032 | ss_recomp_tick_target(tsp); | |
2033 | ||
2034 | } | |
2035 | } | |
2036 | if (val && npp->tick_stop) { | |
2037 | sparcv9_cpu_t * tv9p; | |
2038 | simcpu_t * tsp; | |
2039 | ss_strand_t * tnsp; | |
2040 | uint thread; | |
2041 | ||
2042 | npp->tick_stop = false; | |
2043 | ||
2044 | /* now start all tick counters */ | |
2045 | core_num = (uint_t) -1; | |
2046 | for (idx = 0; idx < npp->nstrands; idx++) { | |
2047 | tv9p = npp->strand[idx]; | |
2048 | tnsp = &(npp->ss_strandp[idx]); | |
2049 | if (tnsp->core != core_num) { | |
2050 | tv9p->tick->offset -= RAW_TICK(tv9p); | |
2051 | core_num = tnsp->core; | |
2052 | } | |
2053 | tsp = tv9p->simp; | |
2054 | ss_recomp_tick_target(tsp); | |
2055 | ||
2056 | } | |
2057 | } | |
2058 | ||
2059 | npp->cmp_regs.tick_enable = val ? true : false; | |
2060 | pthread_mutex_unlock(&npp->tick_en_lock); | |
2061 | goto complete; | |
2062 | case 0x50: | |
2063 | /* | |
2064 | * WS: according to the CMP PRM, writing a '1' to a bit will be ignored | |
2065 | * if the corresponding bit in the core enable reg is 0 (i.e., the | |
2066 | * corresponding virtual core is not enabled) | |
2067 | */ | |
2068 | pthread_mutex_lock(&npp->cmp_lock); | |
2069 | npp->cmp_regs.core_running_status = val & npp->cmp_regs.core_enable_status; | |
2070 | ss_change_exec_state(npp, npp->cmp_regs.core_running_status); | |
2071 | pthread_mutex_unlock(&npp->cmp_lock); | |
2072 | goto complete; | |
2073 | case 0x60: | |
2074 | /* | |
2075 | * W1S: new_value = old_value | new_value; | |
2076 | */ | |
2077 | pthread_mutex_lock(&npp->cmp_lock); | |
2078 | npp->cmp_regs.core_running_status |= val; | |
2079 | /* | |
2080 | * According to the CMP PRM, writing a '1' to a bit will be ignored | |
2081 | * if the corresponding bit in the core enable reg is 0 (i.e., the | |
2082 | * corresponding virtual core is not enabled) | |
2083 | */ | |
2084 | npp->cmp_regs.core_running_status &= npp->cmp_regs.core_enable_status; | |
2085 | ||
2086 | /* | |
2087 | * FIXME: need to check if the virtual core is attempting to park | |
2088 | * all the virtual cores (this is prevented by the hardware) | |
2089 | */ | |
2090 | ss_change_exec_state(npp, npp->cmp_regs.core_running_status); | |
2091 | pthread_mutex_unlock(&npp->cmp_lock); | |
2092 | goto complete; | |
2093 | case 0x68: | |
2094 | /* | |
2095 | * W1C: new_value = old_value & ~new_value; | |
2096 | */ | |
2097 | pthread_mutex_lock(&npp->cmp_lock); | |
2098 | npp->cmp_regs.core_running_status &= ~val; | |
2099 | ss_change_exec_state(npp, npp->cmp_regs.core_running_status); | |
2100 | pthread_mutex_unlock(&npp->cmp_lock); | |
2101 | goto complete; | |
2102 | default: | |
2103 | break; | |
2104 | } | |
2105 | } | |
2106 | goto data_access_exception; | |
2107 | ||
2108 | case SS_ASI_LSU_DIAG_REG: /* 0x42 RW 0 N Sparc BIST control register */ /* SPARC_BIST_CONTROL */ | |
2109 | /* 0x42 RW 8 N Sparc Instruction Mask Register */ /* INST_MASK_REG */ | |
2110 | /* 0x42 RW 10 N Load/Store Unit Diagnostic Register */ /* LSU_DIAG_REG */ | |
2111 | ||
2112 | if (is_load) { | |
2113 | switch(addr) { | |
2114 | case 0x0: | |
2115 | val = nsp->icachep->bist_ctl; | |
2116 | goto load_complete; | |
2117 | case 0x8: | |
2118 | val = nsp->icachep->inst_mask; | |
2119 | goto load_complete; | |
2120 | case 0x10: | |
2121 | val = (nsp->dcachep->assocdis ? 2 : 0) | | |
2122 | (nsp->icachep->assocdis ? 1 : 0); | |
2123 | goto load_complete; | |
2124 | default: | |
2125 | break; | |
2126 | } | |
2127 | } else { | |
2128 | switch(addr) { | |
2129 | case 0x0: | |
2130 | nsp->icachep->bist_ctl = val & 0x7f; | |
2131 | if (val & 1) nsp->icachep->bist_ctl |= 0x400; | |
2132 | goto complete; | |
2133 | case 0x8: | |
2134 | nsp->icachep->inst_mask = val; | |
2135 | goto complete; | |
2136 | case 0x10: | |
2137 | if (val & 2) nsp->dcachep->assocdis = true; | |
2138 | if (val & 1) nsp->icachep->assocdis = true; | |
2139 | goto complete; | |
2140 | default: | |
2141 | break; | |
2142 | } | |
2143 | } | |
2144 | goto data_access_exception; | |
2145 | ||
2146 | case SS_ASI_ERROR_INJECT_REG: /* 0x43 RW 0 N Sparc Error Injection Register */ | |
2147 | if (addr != 0) | |
2148 | goto data_access_exception; | |
2149 | /* TODO: provide per-core field to store this */ | |
2150 | if (is_load) { | |
2151 | val = 0; | |
2152 | goto load_complete; | |
2153 | } else { | |
2154 | if ((val & BIT(31)) != 0) | |
2155 | IMPL_WARNING(("ASI_ERROR_INJECT_REG not " | |
2156 | "implemented (pc=0x%llx)", sp->pc)); | |
2157 | goto complete; | |
2158 | } | |
2159 | ||
2160 | case SS_ASI_LSU_CONTROL_REG: /* 0x45 RW 0 Y Load/Store Unit Control Register */ | |
2161 | switch(addr) { | |
2162 | case 0x0: | |
2163 | if (is_load) { | |
2164 | val = (nsp->lsu_control_raw & ~(LSU_CTRL_DMMU_EN | LSU_CTRL_IMMU_EN)) | | |
2165 | (nsp->dmmu.enabled ? LSU_CTRL_DMMU_EN : 0LL) | | |
2166 | (nsp->immu.enabled ? LSU_CTRL_IMMU_EN : 0LL); | |
2167 | goto load_complete; | |
2168 | } else { | |
2169 | /* | |
2170 | * can only issue this in hpriv mode, so even though we turn the mmu | |
2171 | * on and off, we dont need to flush the x and d translation caches | |
2172 | * because in hpriv mode we're only fetching physical addressses. | |
2173 | */ | |
2174 | ASSERT( V9_RED == v9p->state || V9_HyperPriv == v9p->state ); | |
2175 | ||
2176 | val &= LSU_CTRL_REG_MASK; | |
2177 | if ((val & (LSU_CTRL_WATCH_RE|LSU_CTRL_WATCH_WE)) != 0) { | |
2178 | IMPL_WARNING(("ASI_LSU_CONTROL_REG watchpoint enable unimplemented @ pc=%lx\n", sp->pc)); | |
2179 | } | |
2180 | nsp->lsu_control_raw = val; | |
2181 | nsp->dmmu.enabled = (val & LSU_CTRL_DMMU_EN) != 0; | |
2182 | nsp->immu.enabled = (val & LSU_CTRL_IMMU_EN) != 0; | |
2183 | sp->xicache_trans_flush_pending = true; | |
2184 | sp->xdcache_trans_flush_pending = true; | |
2185 | } | |
2186 | break; | |
2187 | case 0x8: /* 0x45 RW 8 N ASI_DECR */ | |
2188 | case 0x18: /* 0x45 RW 18 N ASI_RST_VEC_MASK */ | |
2189 | ITODO(SS_ASI_LSU_CONTROL_REG); | |
2190 | if (is_load) { | |
2191 | val = 0; | |
2192 | goto load_complete; | |
2193 | } | |
2194 | break; | |
2195 | default: | |
2196 | goto data_access_exception; | |
2197 | } | |
2198 | break; | |
2199 | ||
2200 | case SS_ASI_DCACHE_DATA: /* 0x46 RW - N Dcache data array diagnostics access */ | |
2201 | ||
2202 | if (is_load) { | |
2203 | uint64_t idx, lineword, tag; | |
2204 | ||
2205 | /* L1 D-Cache Diagnostic Access Section 28.6 of N2 PRM 1.1 */ | |
2206 | lineword = addr&SS_DCACHE_DATA_BITS; | |
2207 | tag = (addr&SS_DCACHE_DATA_TAG_BITS)>>10; | |
2208 | ||
2209 | RW_rdlock(&nsp->dcachep->rwlock); | |
2210 | /* | |
2211 | * must match tag to load data | |
2212 | * iterate over 4 ways at bits [12:11] | |
2213 | */ | |
2214 | for (idx=lineword+0x1800; idx>=lineword; idx-=0x800) { | |
2215 | if (nsp->dcachep->tagp[idx] == tag) { | |
2216 | val = nsp->dcachep->datap[idx]; | |
2217 | break; | |
2218 | } | |
2219 | EXEC_WARNING( ("ASI_DCACHE_DATA load tag 0x%xll has no match", | |
2220 | addr&SS_DCACHE_DATA_TAG_BITS) ); | |
2221 | if (idx < 0x800) | |
2222 | break; | |
2223 | } | |
2224 | RW_unlock(&nsp->dcachep->rwlock); | |
2225 | goto load_complete; | |
2226 | } else { | |
2227 | uint64_t idx; | |
2228 | ||
2229 | /* L1 D-Cache Diagnostic Access Section 28.6 of N2 PRM 1.1 */ | |
2230 | idx = (addr&SS_DCACHE_DATA_BITS)>>3; | |
2231 | ||
2232 | RW_wrlock(&nsp->dcachep->rwlock); | |
2233 | nsp->dcachep->datap[idx] = val; | |
2234 | RW_unlock(&nsp->dcachep->rwlock); | |
2235 | goto complete; | |
2236 | } | |
2237 | ||
2238 | case SS_ASI_DCACHE_TAG: /* 0x47 RW - N Dcache tag and valid bit diagnostics access */ | |
2239 | ||
2240 | if (is_load) { | |
2241 | uint64_t idx; | |
2242 | ||
2243 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ | |
2244 | idx = (addr&SS_DCACHE_TAG_WAYLINE_BITS)>>4; | |
2245 | ||
2246 | RW_rdlock(&nsp->dcachep->rwlock); | |
2247 | val = nsp->dcachep->tagp[idx]; | |
2248 | RW_unlock(&nsp->dcachep->rwlock); | |
2249 | goto load_complete; | |
2250 | } else { | |
2251 | uint64_t idx; | |
2252 | ||
2253 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ | |
2254 | idx = (addr&SS_DCACHE_TAG_WAYLINE_BITS)>>4; | |
2255 | ||
2256 | RW_wrlock(&nsp->dcachep->rwlock); | |
2257 | nsp->dcachep->tagp[idx] = val; | |
2258 | RW_unlock(&nsp->dcachep->rwlock); | |
2259 | goto complete; | |
2260 | } | |
2261 | ||
2262 | case N2_ASI_IRF_ECC_REG: /* 0x48 RO 0-F8 Y IRF ECC diagnostic access */ | |
2263 | if (!is_load) goto data_access_exception; | |
2264 | val = 0; | |
2265 | goto load_complete; | |
2266 | ||
2267 | case N2_ASI_FRF_ECC_REG: /* 0x49 RO 0-F8 Y FRF ECC diagnostic access */ | |
2268 | if (!is_load) goto data_access_exception; | |
2269 | val = 0; | |
2270 | goto load_complete; | |
2271 | ||
2272 | case N2_ASI_STB_ACCESS: /* 0x4A RO 0-1F8 Y Store buffer diagnostic access */ | |
2273 | if (!is_load) goto data_access_exception; | |
2274 | val = 0; | |
2275 | goto load_complete; | |
2276 | ||
2277 | case N2_ASI_DESR: /* 0x4C R 0 Y Disrupting error status | |
2278 | R 8 Y Deferred error status | |
2279 | RW 10 N Core error reporting enable | |
2280 | RW 18 Y Core error trap enable | |
2281 | R 20 Y Core local error status | |
2282 | R 28 Y Core local error status */ | |
2283 | /* handled by ss_error_asi_noop_access */ | |
2284 | goto data_access_exception; | |
2285 | ||
2286 | case N2_ASI_SPACE_PWR_MGMT: /* 0x4E RW 0 Y Sparc power management */ | |
2287 | core_num = nsp->core; | |
2288 | if (is_load) { | |
2289 | switch(addr) { | |
2290 | case 0x0: | |
2291 | val = npp->sparc_power_mgmtp[core_num]; | |
2292 | goto load_complete; | |
2293 | default: | |
2294 | break; | |
2295 | } | |
2296 | } else { | |
2297 | switch(addr) { | |
2298 | case 0x0: | |
2299 | npp->sparc_power_mgmtp[core_num] = (val & MASK64(15,0)); | |
2300 | goto complete; | |
2301 | default: | |
2302 | break; | |
2303 | } | |
2304 | } | |
2305 | goto data_access_exception; | |
2306 | ||
2307 | case SS_ASI_HYP_SCRATCHPAD: | |
2308 | /* | |
2309 | * Niagara1/N2 : | |
2310 | * 0x4F RW 0-38 Y Hypervisor Scratchpad | |
2311 | * Rock : | |
2312 | * 0x4F RW 0-18 Y Hypervisor Scratchpad | |
2313 | */ | |
2314 | ||
2315 | if (INVALID_HYP_SCRATCHPAD(addr)) { | |
2316 | goto data_access_exception; | |
2317 | } else { | |
2318 | uint64_t *valp = | |
2319 | &(nsp->strand_reg[SSR_HSCRATCHPAD_INDEX + (addr>>3)]); | |
2320 | if (is_load) { | |
2321 | val = *valp; | |
2322 | goto load_complete; | |
2323 | } | |
2324 | DBGSCRATCH( if (*valp != val) | |
2325 | lprintf(sp->gid, "SCRATCH store 0x%x/0x%llx: " | |
2326 | "0x%llx -> 0x%llx pc=0x%llx\n", | |
2327 | asi, addr, *valp, val, sp->pc); ); | |
2328 | *valp = val; | |
2329 | } | |
2330 | break; | |
2331 | ||
2332 | case SS_ASI_IMMU: /* 0x50 R 0 Y IMMU Tag Target register */ | |
2333 | /* 0x50 RW 18 Y IMMU Synchronous Fault Status Register */ | |
2334 | /* 0x50 RW 30 Y IMMU TLB Tag Access Register */ | |
2335 | /* 0x50 RW 38 Y IMMU VA Data Watchpoint Register */ | |
2336 | mmup = &(nsp->immu); | |
2337 | ||
2338 | if (is_load) { | |
2339 | switch(addr) { | |
2340 | case 0x0: | |
2341 | tag_target_read:; | |
2342 | val = (mmup->tag_access_reg >> 22) | ((mmup->tag_access_reg&MASK64(12,0))<<48); | |
2343 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2344 | goto load_complete; | |
2345 | case 0x18: | |
2346 | val = nsp->error.isfsr; | |
2347 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2348 | goto load_complete; | |
2349 | case 0x30: | |
2350 | tag_access_read:; | |
2351 | val = mmup->tag_access_reg; | |
2352 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2353 | VA48_ASSERT(val); | |
2354 | goto load_complete; | |
2355 | case 0x38: | |
2356 | val = mmup->watchpoint; | |
2357 | goto load_complete; | |
2358 | default: | |
2359 | break; | |
2360 | } | |
2361 | } else { | |
2362 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2363 | switch(addr) { | |
2364 | case 0x18: | |
2365 | nsp->error.isfsr = val & MMU_SFSR_MASK; | |
2366 | sp->xicache_trans_flush_pending = true; | |
2367 | goto complete; | |
2368 | case 0x30: | |
2369 | tag_access_write:; | |
2370 | mmup->tag_access_reg = VA48(val); | |
2371 | goto complete; | |
2372 | case 0x38: | |
2373 | mmup->watchpoint = VA48(val); | |
2374 | goto complete; | |
2375 | default: | |
2376 | break; | |
2377 | } | |
2378 | } | |
2379 | goto data_access_exception; | |
2380 | ||
2381 | case N2_ASI_MRA_ACCESS: /* 0x51 RO 0-38 Y HWTW MRA Access */ | |
2382 | if (!is_load) goto data_access_exception; | |
2383 | val = 0; | |
2384 | goto load_complete; | |
2385 | ||
2386 | case N2_ASI_MMU_REAL_RANGE: /* 0x52 RW 108-120 Y MMU TSB Real Range | |
2387 | 208-220 Y MMU TSB Physical Offset */ | |
2388 | if (is_load) { | |
2389 | switch(addr) { | |
2390 | case 0x108: | |
2391 | case 0x110: | |
2392 | case 0x118: | |
2393 | case 0x120: | |
2394 | idx = ((addr - 0x108) >> 3) & 0x3; | |
2395 | val = nsp->real_range_reg[idx]; | |
2396 | goto load_complete; | |
2397 | case 0x208: | |
2398 | case 0x210: | |
2399 | case 0x218: | |
2400 | case 0x220: | |
2401 | idx = ((addr - 0x208) >> 3) & 0x3; | |
2402 | val = nsp->phy_off_reg[idx]; | |
2403 | goto load_complete; | |
2404 | default: | |
2405 | break; | |
2406 | } | |
2407 | } else { | |
2408 | DBGMMU( lprintf(sp->gid, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); | |
2409 | switch(addr) { | |
2410 | case 0x108: | |
2411 | case 0x110: | |
2412 | case 0x118: | |
2413 | case 0x120: | |
2414 | idx = ((addr - 0x108) >> 3) & 0x3; | |
2415 | nsp->real_range_reg[idx] = val; | |
2416 | goto complete; | |
2417 | case 0x208: | |
2418 | case 0x210: | |
2419 | case 0x218: | |
2420 | case 0x220: | |
2421 | idx = ((addr - 0x208) >> 3) & 0x3; | |
2422 | nsp->phy_off_reg[idx] = val; | |
2423 | goto complete; | |
2424 | default: | |
2425 | break; | |
2426 | } | |
2427 | } | |
2428 | goto data_access_exception; | |
2429 | ||
2430 | case N2_ITLB_PROBE: /* 0x53 R 0 N ITLB Probe */ | |
2431 | if (!is_load) goto data_access_exception; | |
2432 | TODO(N2_ITLB_PROBE); | |
2433 | break; | |
2434 | ||
2435 | case SS_ASI_ITLB_DATA_IN_REG: /* 0x54 W 0,400 N IMMU data in register */ | |
2436 | /* 0x54 RW 10-28 N Ctxt zero TSB config 0-3 register */ | |
2437 | /* 0x54 RW 30-40 N Ctxt nonzero TSB config 0-3 register */ | |
2438 | /* 0x54 R 50-68 N ITSB pointer 0-3 register */ | |
2439 | /* 0x54 R 70-88 N DTSB pointer 0-3 register */ | |
2440 | tlbp = nsp->itlbp; | |
2441 | ||
2442 | if (is_load) { | |
2443 | switch(addr) { | |
2444 | case 0x10: | |
2445 | case 0x18: | |
2446 | case 0x20: | |
2447 | case 0x28: | |
2448 | idx = ((addr - 0x10) >> 3) & 0x3; | |
2449 | val = nsp->mmu_zero_ctxt_tsb_config[idx].data; | |
2450 | goto load_complete; | |
2451 | case 0x30: | |
2452 | case 0x38: | |
2453 | case 0x40: | |
2454 | case 0x48: | |
2455 | idx = ((addr - 0x30) >> 3) & 0x3; | |
2456 | val = nsp->mmu_nonzero_ctxt_tsb_config[idx].data; | |
2457 | goto load_complete; | |
2458 | case 0x50: | |
2459 | case 0x58: | |
2460 | case 0x60: | |
2461 | case 0x68: | |
2462 | mmup = &(nsp->immu); | |
2463 | idx = ((addr - 0x50) >> 3) & 0x3; | |
2464 | if ((mmup->tag_access_reg & MASK64(12,0)) == 0) | |
2465 | tsbinfop = &nsp->mmu_zero_ctxt_tsb_config[idx]; | |
2466 | else | |
2467 | tsbinfop = &nsp->mmu_nonzero_ctxt_tsb_config[idx]; | |
2468 | val = ss_make_tsb_pointer(mmup->tag_access_reg, tsbinfop); | |
2469 | DBGMMU( lprintf(sp->gid, "MMU ASI load 0x%x/0x%llx : 0x%llx (%cTSB PTR%d) (pc=0x%llx)\n", asi, addr, val, mmup->is_immu ? 'I' : 'D', idx, sp->pc); ); | |
2470 | goto load_complete; | |
2471 | case 0x70: | |
2472 | case 0x78: | |
2473 | case 0x80: | |
2474 | case 0x88: | |
2475 | mmup = &(nsp->dmmu); | |
2476 | idx = ((addr - 0x70) >> 3) & 0x3; | |
2477 | if ((mmup->tag_access_reg & MASK64(12,0)) == 0) | |
2478 | tsbinfop = &nsp->mmu_zero_ctxt_tsb_config[idx]; | |
2479 | else | |
2480 | tsbinfop = &nsp->mmu_nonzero_ctxt_tsb_config[idx]; | |
2481 | val = ss_make_tsb_pointer(mmup->tag_access_reg, tsbinfop); | |
2482 | DBGMMU( lprintf(sp->gid, "MMU ASI load 0x%x/0x%llx : 0x%llx (%cTSB PTR%d) (pc=0x%llx)\n", asi, addr, val, mmup->is_immu ? 'I' : 'D', idx, sp->pc); ); | |
2483 | goto load_complete; | |
2484 | default: | |
2485 | break; | |
2486 | } | |
2487 | } else { | |
2488 | mmup = &(nsp->immu); | |
2489 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2490 | ||
2491 | switch(addr) { | |
2492 | case 0x0: | |
2493 | case 0x400: | |
2494 | { | |
2495 | uint_t flags_unused = 0; | |
2496 | uint64_t pa_offset_unused = 0; | |
2497 | /* | |
2498 | * addr=0x0: 'Real' bit<10> = 0, load VA->PA translation into TLB | |
2499 | * addr=0x400: 'Real' bit<10> = 1, load RA->PA translation into TLB | |
2500 | */ | |
2501 | is_real = SS_TLB_IS_REAL(addr); | |
2502 | ||
2503 | if (ss_tlb_insert(sp, mmup, tlbp, nsp->partid, is_real, val, &flags_unused, &pa_offset_unused) == SS_trap_NONE) | |
2504 | goto complete; | |
2505 | ||
2506 | goto data_access_exception; | |
2507 | } | |
2508 | case 0x10: | |
2509 | case 0x18: | |
2510 | case 0x20: | |
2511 | case 0x28: | |
2512 | idx = ((addr - 0x10) >> 3) & 0x3; | |
2513 | tsbinfop = &nsp->mmu_zero_ctxt_tsb_config[idx]; | |
2514 | niagara2_write_tsb_config(sp, tsbinfop, val); | |
2515 | goto complete; | |
2516 | case 0x30: | |
2517 | case 0x38: | |
2518 | case 0x40: | |
2519 | case 0x48: | |
2520 | idx = ((addr - 0x30) >> 3) & 0x3; | |
2521 | tsbinfop = &nsp->mmu_nonzero_ctxt_tsb_config[idx]; | |
2522 | niagara2_write_tsb_config(sp, tsbinfop, val); | |
2523 | goto complete; | |
2524 | default: | |
2525 | break; | |
2526 | } | |
2527 | } | |
2528 | goto data_access_exception; | |
2529 | ||
2530 | ||
2531 | case SS_ASI_ITLB_DATA_ACCESS_REG: /* 0x55 RW 0-1F8,400-5f8 N IMMU TLB Data Access Register */ | |
2532 | tlbp = nsp->itlbp; | |
2533 | mmup = &(nsp->immu); | |
2534 | tlb_data_access:; | |
2535 | idx = (addr >> 3) & 0x7f; | |
2536 | if (is_load) { | |
2537 | tlb_entry_t * tep; | |
2538 | ||
2539 | if (idx >= tlbp->nentries) goto data_access_exception; | |
2540 | #if ERROR_INJECTION | |
2541 | if (tlb_data_access_error_match(sp, mmup, idx)) | |
2542 | return; | |
2543 | #endif | |
2544 | RW_rdlock(&tlbp->rwlock); | |
2545 | tep = &tlbp->tlb_entryp[idx]; | |
2546 | val = tep->data; | |
2547 | RW_unlock(&tlbp->rwlock); | |
2548 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2549 | goto load_complete; | |
2550 | } else { | |
2551 | is_real = SS_TLB_IS_REAL(addr); | |
2552 | if (ss_tlb_insert_idx(sp, mmup, tlbp, nsp->partid, | |
2553 | is_real, val, idx) == SS_trap_NONE) | |
2554 | goto complete; | |
2555 | ||
2556 | goto data_access_exception; | |
2557 | } | |
2558 | ||
2559 | case SS_ASI_ITLB_TAG_READ_REG: /* 0x56 R 0-1F8,400-5F8 N IMMU TLB Tag Read Register */ | |
2560 | tlbp = nsp->itlbp; | |
2561 | mmup = &(nsp->immu); | |
2562 | tlb_tag_read:; | |
2563 | if (is_load) { | |
2564 | tlb_entry_t * tep; | |
2565 | ||
2566 | idx = (addr >> 3) & 0x7f; | |
2567 | if (idx >= tlbp->nentries) goto data_access_exception; | |
2568 | #if ERROR_INJECTION | |
2569 | if (tlb_tag_access_error_match(sp, mmup, idx)) | |
2570 | return; | |
2571 | #endif | |
2572 | RW_rdlock(&tlbp->rwlock); | |
2573 | tep = &tlbp->tlb_entryp[idx]; | |
2574 | val = ((uint64_t)tep->partid << 61) | | |
2575 | ((uint64_t)(tep->is_real?1:0) << 60); | |
2576 | val |= (tep->tag_pfn & MASK64(47, 13)) | (uint64_t)tep->tag_context; | |
2577 | /* TODO: Return Parity and Used bits when implemented. */ | |
2578 | RW_unlock(&tlbp->rwlock); | |
2579 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2580 | goto load_complete; | |
2581 | } | |
2582 | goto data_access_exception; | |
2583 | ||
2584 | case SS_ASI_IMMU_DEMAP: /* 0x57 W 0 Y IMMU TLB Demap */ | |
2585 | mmup = &(nsp->immu); | |
2586 | tlbp = nsp->itlbp; | |
2587 | tlb_demap:; | |
2588 | if (is_load) goto data_access_exception; | |
2589 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2590 | { | |
2591 | ss_demap_t op; | |
2592 | uint_t context; | |
2593 | ||
2594 | op = (ss_demap_t) ((addr>>6)&0x3); | |
2595 | ||
2596 | switch ((addr >> 4)&0x3) { | |
2597 | case 0x0: context = nsp->pri_context; break; /* primary context */ | |
2598 | case 0x1: context = nsp->sec_context; /* secondary context */ | |
2599 | /* | |
2600 | * immu doesn't support secondary context encoding for | |
2601 | * demap page and demap context ops (causing demap to be ignored) | |
2602 | */ | |
2603 | if ((mmup->is_immu) && (op==NA_demap_page || op==NA_demap_context)) | |
2604 | goto demap_noop; | |
2605 | break; | |
2606 | case 0x2: context = SS_NUCLEUS_CONTEXT; break; /* nucleus context */ | |
2607 | case 0x3: | |
2608 | /* | |
2609 | * use of reserved value is valid but causes | |
2610 | * demap to be ignored for the following two ops | |
2611 | */ | |
2612 | if (op==NA_demap_page || op==NA_demap_context) { | |
2613 | demap_noop: | |
2614 | EXEC_WARNING(("(@pc=0x%llx) demap noop " | |
2615 | "asi=0x%x va=0x%llx", sp->pc, asi, addr)); | |
2616 | goto complete; | |
2617 | } | |
2618 | } | |
2619 | ||
2620 | if (op == NA_demap_page) { | |
2621 | if ((addr & BIT(47)) == 0) { | |
2622 | if ((addr & MASK64(63, 48)) != 0) { | |
2623 | EXEC_WARNING(("(@pc=0x%llx) demap " | |
2624 | "address range " | |
2625 | "asi=0x%x va=0x%llx", | |
2626 | sp->pc, asi, addr)); | |
2627 | } | |
2628 | addr &= MASK64(47, 0); | |
2629 | } else { | |
2630 | if ((addr & MASK64(63, 48)) != MASK64(63, 48)) { | |
2631 | EXEC_WARNING(("(@pc=0x%llx) demap " | |
2632 | "address range " | |
2633 | "asi=0x%x va=0x%llx", | |
2634 | sp->pc, asi, addr)); | |
2635 | } | |
2636 | addr |= MASK64(63, 48); | |
2637 | } | |
2638 | } | |
2639 | ||
2640 | is_real = SS_TLB_IS_REAL(addr); | |
2641 | if (!ss_demap(sp, op, mmup, tlbp, nsp->partid, is_real, context, addr)) goto data_access_exception; | |
2642 | } | |
2643 | goto complete; | |
2644 | ||
2645 | ||
2646 | case SS_ASI_DMMU: /* 0x58 R 0 Y D-MMU Tag Target Register */ | |
2647 | /* 0x58 RW 18 Y DMMU Synchronous Fault Status Register */ | |
2648 | /* 0x58 R 20 Y DMMU Synchronous Fault Address Register */ | |
2649 | /* 0x58 RW 30 Y DMMU TLB Tag Access Register */ | |
2650 | /* 0x58 RW 38 Y DMMU VA Data Watchpoint Register */ | |
2651 | /* 0x58 RW 40 Y Niagara 2: Tablewalk Config Reg */ | |
2652 | /* 0x58 RW 80 Y I/DMMU Partition ID */ | |
2653 | mmup = &(nsp->dmmu); | |
2654 | if (is_load) { | |
2655 | switch(addr) { | |
2656 | case 0x0: | |
2657 | goto tag_target_read; | |
2658 | case 0x18: | |
2659 | val = nsp->error.dsfsr; | |
2660 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2661 | goto load_complete; | |
2662 | case 0x20: | |
2663 | val = mmup->fault_addr; | |
2664 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2665 | VA48_ASSERT(val); | |
2666 | goto load_complete; | |
2667 | case 0x30: | |
2668 | goto tag_access_read; | |
2669 | case 0x38: | |
2670 | val = mmup->watchpoint; | |
2671 | goto load_complete; | |
2672 | case 0x40: | |
2673 | val = nsp->hwtw_config; | |
2674 | goto load_complete; | |
2675 | case 0x80: | |
2676 | val = (uint64_t)(nsp->partid); | |
2677 | goto load_complete; | |
2678 | default: | |
2679 | break; | |
2680 | } | |
2681 | } else { | |
2682 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2683 | switch(addr) { | |
2684 | case 0x18: | |
2685 | nsp->error.dsfsr = val & MMU_SFSR_MASK; | |
2686 | goto complete; | |
2687 | case 0x30: | |
2688 | goto tag_access_write; | |
2689 | case 0x38: | |
2690 | mmup->watchpoint = VA48(val); | |
2691 | goto complete; | |
2692 | case 0x40: | |
2693 | RSVD_MASK(sp, MASK64(1,0), val, asi, addr); | |
2694 | nsp->hwtw_config = val; | |
2695 | if (!val) | |
2696 | IMPL_WARNING(("Unimplemented hwtw config control bits 0x%x\n",val)); | |
2697 | goto complete; | |
2698 | case 0x80: | |
2699 | /* can only do in hypervisor mode - switching mode causes the xi/xd | |
2700 | * cache flush anyway | |
2701 | */ | |
2702 | nsp->partid = val & 0x7; /* three bits of part id only */ | |
2703 | sp->xicache_trans_flush_pending = true; | |
2704 | sp->xdcache_trans_flush_pending = true; | |
2705 | goto complete; | |
2706 | default: | |
2707 | break; | |
2708 | } | |
2709 | } | |
2710 | goto data_access_exception; | |
2711 | ||
2712 | case N2_SCRATCHPAD_ACCESS: /* 0x59 RO 0-78 Y Scratchpad Register Diagnostic Acces */ | |
2713 | if (!is_load) goto data_access_exception; | |
2714 | val = 0; | |
2715 | goto load_complete; | |
2716 | case N2_TICK_ACCESS: /* 0x5A RO 0-8,10,20-30 Y Tick Register Diagnostic Access */ | |
2717 | if (!is_load) goto data_access_exception; | |
2718 | val = 0; | |
2719 | goto load_complete; | |
2720 | case N2_TSA_ACCESS: /* 0x5B RO 0-38 Y TSA Diagnostic Access */ | |
2721 | if (!is_load) goto data_access_exception; | |
2722 | val = 0; | |
2723 | goto load_complete; | |
2724 | case SS_ASI_DTLB_DATA_IN_REG: /* 0x5C W 0 N DMMU data in register */ | |
2725 | { | |
2726 | uint_t flags_unused; | |
2727 | uint64_t pa_offset_unused; | |
2728 | tlbp = nsp->dtlbp; | |
2729 | mmup = &(nsp->dmmu); | |
2730 | ||
2731 | if (is_load || (addr & ~SS_TLB_REAL_MASK)!=0) | |
2732 | goto data_access_exception; | |
2733 | ||
2734 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); | |
2735 | is_real = SS_TLB_IS_REAL(addr); | |
2736 | ||
2737 | if (ss_tlb_insert(sp, mmup, tlbp, nsp->partid, is_real, val, &flags_unused, &pa_offset_unused) == SS_trap_NONE) | |
2738 | goto complete; | |
2739 | ||
2740 | goto data_access_exception; | |
2741 | } | |
2742 | case SS_ASI_DTLB_DATA_ACCESS_REG: /* 0x5D RW 0-7F8 N DMMU TLB Data Access Register */ | |
2743 | tlbp = nsp->dtlbp; | |
2744 | mmup = &(nsp->dmmu); | |
2745 | goto tlb_data_access; | |
2746 | ||
2747 | case SS_ASI_DTLB_TAG_READ_REG: /* 0x5E R 0-7F8 N DMMU TLB Tag Read Register */ | |
2748 | tlbp = nsp->dtlbp; | |
2749 | mmup = &(nsp->dmmu); | |
2750 | goto tlb_tag_read; | |
2751 | ||
2752 | case SS_ASI_DMMU_DEMAP: /* 0x5F W 0 Y DMMU TLB Demap */ | |
2753 | mmup = &(nsp->dmmu); | |
2754 | tlbp = nsp->dtlbp; | |
2755 | goto tlb_demap; | |
2756 | ||
2757 | case SS_ASI_CMP_CORE_INTR_ID: /* 0x63 R 0 Y Core Interrupt ID | |
2758 | 10 Y Core ID */ | |
2759 | if (!is_load) goto data_access_exception; | |
2760 | ||
2761 | switch(addr) { | |
2762 | case 0x0: | |
2763 | val = nsp->vcore_id; | |
2764 | goto load_complete; | |
2765 | case 0x10: | |
2766 | val = ((uint64_t)(STRANDSPERCORE - 1)<<32) | | |
2767 | ((STRANDS_PER_CHIP - 1)<<16) | | |
2768 | nsp->vcore_id; | |
2769 | goto load_complete; | |
2770 | default: | |
2771 | break; | |
2772 | } | |
2773 | goto data_access_exception; | |
2774 | ||
2775 | case SS_ASI_ICACHE_INSTR: /* 0x66 RW - N Icache data array diagnostics access */ | |
2776 | ||
2777 | if (is_load) { | |
2778 | uint64_t idx; | |
2779 | ||
2780 | /* L1 I-Cache Diagnostic Access Section 28.5 of N2 PRM 1.1 */ | |
2781 | idx = ((addr&SS_ICACHE_DATA_LINEWORD_BITS)|((addr&SS_ICACHE_DATA_WAY_BITS)>>3))>>3; | |
2782 | ||
2783 | RW_rdlock(&nsp->icachep->rwlock); | |
2784 | val = nsp->icachep->datap[idx]; | |
2785 | RW_unlock(&nsp->icachep->rwlock); | |
2786 | goto load_complete; | |
2787 | } else { | |
2788 | uint64_t idx; | |
2789 | ||
2790 | /* L1 I-Cache Diagnostic Access Section 28.5 of N2 PRM 1.1 */ | |
2791 | idx = ((addr&SS_ICACHE_DATA_LINEWORD_BITS)|((addr&SS_ICACHE_DATA_WAY_BITS)>>3))>>3; | |
2792 | ||
2793 | RW_wrlock(&nsp->icachep->rwlock); | |
2794 | nsp->icachep->datap[idx] = val; | |
2795 | RW_unlock(&nsp->icachep->rwlock); | |
2796 | goto complete; | |
2797 | } | |
2798 | ||
2799 | case SS_ASI_ICACHE_TAG: /* 0x67 RW - N Icache tag and valid bit diagnostics access */ | |
2800 | ||
2801 | if (is_load) { | |
2802 | uint64_t idx; | |
2803 | ||
2804 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ | |
2805 | idx = (((addr&SS_ICACHE_TAG_LINE_BITS)>>3)|((addr&SS_ICACHE_TAG_WAY_BITS)>>6))>>3; | |
2806 | ||
2807 | RW_rdlock(&nsp->icachep->rwlock); | |
2808 | val = nsp->icachep->tagp[idx]; | |
2809 | RW_unlock(&nsp->icachep->rwlock); | |
2810 | goto load_complete; | |
2811 | } else { | |
2812 | uint64_t idx; | |
2813 | ||
2814 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ | |
2815 | idx = (((addr&SS_ICACHE_TAG_LINE_BITS)>>3)|((addr&SS_ICACHE_TAG_WAY_BITS)>>6))>>3; | |
2816 | ||
2817 | RW_wrlock(&nsp->icachep->rwlock); | |
2818 | nsp->icachep->tagp[idx] = val; | |
2819 | RW_unlock(&nsp->icachep->rwlock); | |
2820 | goto complete; | |
2821 | } | |
2822 | ||
2823 | case N2_ASI_INTR_RECEIVE: /* 0x72 RW 0 Y Interrupt Receive Register */ | |
2824 | if (0LL != addr) goto data_access_exception; | |
2825 | if (is_load) { | |
2826 | /* pthread_mutex_lock(&nsp->irq_lock); */ | |
2827 | val = nsp->irq_vector; | |
2828 | /* pthread_mutex_unlock(&nsp->irq_lock); */ | |
2829 | goto load_complete; | |
2830 | } else { | |
2831 | pthread_mutex_lock(&nsp->irq_lock); | |
2832 | nsp->irq_vector &= val; | |
2833 | pthread_mutex_unlock(&nsp->irq_lock); | |
2834 | /* TODO: need to check interrupts? (bits cleared) */ | |
2835 | ss_check_interrupts(sp); | |
2836 | } | |
2837 | break; | |
2838 | ||
2839 | case N2_ASI_INTR_W: /* 0x73 W 0 Y Interrupt Vector Dispatch Register */ | |
2840 | if (0LL != addr || is_load) goto data_access_exception; | |
2841 | DBGMONDO( lprintf(sp->gid, "ASI_INTR_W store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); | |
2842 | niagara2_send_xirq(sp, npp, val); | |
2843 | break; | |
2844 | ||
2845 | case N2_ASI_INTR_R: /* 0x74 R 0 Y Incoming Vector Register */ | |
2846 | if (0LL != addr || !is_load) goto data_access_exception; | |
2847 | pthread_mutex_lock(&nsp->irq_lock); | |
2848 | { | |
2849 | uint64_t vec; | |
2850 | uint8_t bit = 0; | |
2851 | ||
2852 | vec = nsp->irq_vector; | |
2853 | if (vec == 0) { | |
2854 | val = 0; | |
2855 | pthread_mutex_unlock(&nsp->irq_lock); | |
2856 | goto load_complete; | |
2857 | } | |
2858 | if (vec & 0xffffffff00000000ull) { | |
2859 | bit += 32; vec >>= 32; | |
2860 | } | |
2861 | if (vec & 0xffff0000) { | |
2862 | bit += 16; vec >>= 16; | |
2863 | } | |
2864 | if (vec & 0xff00) { | |
2865 | bit += 8; vec >>= 8; | |
2866 | } | |
2867 | if (vec & 0xf0) { | |
2868 | bit += 4; vec >>= 4; | |
2869 | } | |
2870 | if (vec & 0xc) { | |
2871 | bit += 2; vec >>= 2; | |
2872 | } | |
2873 | if (vec & 0x2) { | |
2874 | bit += 1; | |
2875 | } | |
2876 | nsp->irq_vector &= ~((uint64_t)1<<bit); | |
2877 | ||
2878 | val = bit; | |
2879 | } | |
2880 | pthread_mutex_unlock(&nsp->irq_lock); | |
2881 | goto load_complete; | |
2882 | ||
2883 | default: | |
2884 | data_access_exception: | |
2885 | ||
2886 | #if ERROR_TRAP_GEN /* { */ | |
2887 | /* | |
2888 | * Check for error trap generation ESR related accesses. | |
2889 | * Returns true if a valid ASI/VA access was made | |
2890 | */ | |
2891 | if (ss_error_asi_access(sp, op, regnum, asi, is_load, addr, val)) | |
2892 | goto complete; | |
2893 | ||
2894 | #else /* } if not ERROR_TRAP_GEN { */ | |
2895 | /* | |
2896 | * Even during normal execution, the hypervisor does some | |
2897 | * amount of error register initialization which shouldn't | |
2898 | * cause Legion to crash. | |
2899 | */ | |
2900 | if (ss_error_asi_noop_access(sp, op, regnum, asi, is_load, addr)) | |
2901 | goto complete; | |
2902 | #endif /* } ERROR_TRAP_GEN */ | |
2903 | ||
2904 | tt = (sparcv9_trap_type_t)SS_trap_DAE_invalid_ASI; | |
2905 | ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]); | |
2906 | MEMORY_ACCESS_TRAP(); | |
2907 | v9p->post_precise_trap(sp, tt); | |
2908 | return; | |
2909 | } | |
2910 | ||
2911 | complete:; | |
2912 | NEXT_INSTN(sp); | |
2913 | return; | |
2914 | ||
2915 | ||
2916 | load_complete: | |
2917 | ||
2918 | #if ERROR_TRAP_GEN /* { */ | |
2919 | { | |
2920 | /* | |
2921 | * When Error Trap generation is turned on, we need to | |
2922 | * check all ASI load operations against the list of | |
2923 | * ASI/VA/value pairs provided by the user. | |
2924 | * | |
2925 | * This allows very precise control over which code paths | |
2926 | * are tested in the simulated SW. | |
2927 | */ | |
2928 | ss_check_user_asi_list(sp, asi, addr, &val, true, true); | |
2929 | } | |
2930 | #endif /* } ERROR_TRAP_GEN */ | |
2931 | ||
2932 | if (op == MA_Ld ) { | |
2933 | if (regnum != Reg_sparcv9_g0) sp->intreg[regnum] = val; | |
2934 | } else { /* op == MA_LdFloat */ | |
2935 | ASSERT(MA_LdFloat == op); | |
2936 | switch(size) { | |
2937 | case MA_Size32: | |
2938 | sp->fpreg.s32[regnum] = val; | |
2939 | break; | |
2940 | case MA_Size64: | |
2941 | sp->fpreg.s64[regnum >> 1] = val; | |
2942 | break; | |
2943 | default: | |
2944 | goto unimplemented; | |
2945 | } | |
2946 | } | |
2947 | goto complete; | |
2948 | ||
2949 | unimplemented: | |
2950 | IMPL_WARNING(("ASI access (0x%02x) (@pc=0x%llx) to address 0x%llx currently unimplemented", asi, sp->pc, addr)); | |
2951 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); | |
2952 | return; | |
2953 | ||
2954 | } | |
2955 | ||
2956 | /* | |
2957 | * When the ERROR_TRAP_GEN is turned off the CPU Error handling related ASI | |
2958 | * access treated as noop . Later on even during normal execution, the hypervisor | |
2959 | * might do some amount of error register initialization which shouldn't cause | |
2960 | * Legion to crash. | |
2961 | */ | |
2962 | ||
2963 | static bool_t ss_error_asi_noop_access(simcpu_t * sp, maccess_t op, uint_t regnum, uint_t asi, bool_t is_load, tvaddr_t addr) | |
2964 | { | |
2965 | switch (asi) { | |
2966 | ||
2967 | case N2_ASI_DESR: | |
2968 | switch (addr) { | |
2969 | case 0x0: | |
2970 | case 0x8: | |
2971 | case 0x20: | |
2972 | case 0x28: | |
2973 | if (!is_load) | |
2974 | break; | |
2975 | /*FALLTHROUGH*/ | |
2976 | ||
2977 | case 0x10: | |
2978 | case 0x18: | |
2979 | goto ss_asi_noop; | |
2980 | default: | |
2981 | break; | |
2982 | } | |
2983 | default: | |
2984 | break; | |
2985 | } | |
2986 | ||
2987 | /* | |
2988 | * Match not found. | |
2989 | */ | |
2990 | return false; | |
2991 | ||
2992 | /* | |
2993 | * Match found. Treat ASI access as noop. | |
2994 | */ | |
2995 | ss_asi_noop: | |
2996 | ||
2997 | DBGERR( lprintf(sp->gid, "CPU Error handling related ASI 0x%x VA 0x%llx access treated as noop.\n", asi, addr); ); | |
2998 | if (is_load) { | |
2999 | ASSERT(MA_Ld == op); | |
3000 | if (regnum != Reg_sparcv9_g0) sp->intreg[regnum] = 0; | |
3001 | } | |
3002 | ||
3003 | return true; | |
3004 | } | |
3005 | ||
3006 | ||
3007 | /* | |
3008 | * Slow generic memory access .. | |
3009 | * .. becomes the path for all the accesses we cant handle via the load/store hash | |
3010 | */ | |
3011 | ||
3012 | ||
3013 | ||
3014 | void | |
3015 | ss_memory_asi_access(simcpu_t * sp, maccess_t memop, uint64_t * regp, | |
3016 | mem_flags_t mflags, uint_t asi, uint_t context_type, | |
3017 | uint_t align_mask, tvaddr_t va, tvaddr_t reg2) | |
3018 | { | |
3019 | sparcv9_cpu_t * v9p; | |
3020 | ss_strand_t * nsp; | |
3021 | ss_proc_t * npp; | |
3022 | l2c_t * l2p; | |
3023 | error_conf_t * ep; | |
3024 | error_t * errorp; | |
3025 | tpaddr_t pa; | |
3026 | tpaddr_t pa_tag; | |
3027 | tvaddr_t tag, perm_cache; | |
3028 | uint8_t * bufp; | |
3029 | uint8_t * ptr; | |
3030 | config_addr_t * cap; | |
3031 | tpaddr_t extent; | |
3032 | uint_t flags; | |
3033 | uint_t size; | |
3034 | uint_t op; | |
3035 | dev_access_t da; | |
3036 | uint_t i; | |
3037 | ||
3038 | v9p = (sparcv9_cpu_t *)(sp->specificp); | |
3039 | nsp = v9p->impl_specificp; | |
3040 | npp = (ss_proc_t *)(sp->config_procp->procp); | |
3041 | ||
3042 | mflags ^= (asi & SS_ASI_LE_MASK) ? MF_Little_Endian : 0; | |
3043 | ||
3044 | /* OK, derive access address etc. */ | |
3045 | ||
3046 | size = memop & MA_Size_Mask; | |
3047 | op = memop & MA_Op_Mask; | |
3048 | switch (asi) { | |
3049 | case SS_ASI_PST8_P: | |
3050 | case SS_ASI_PST8_S: | |
3051 | case SS_ASI_PST16_P: | |
3052 | case SS_ASI_PST16_S: | |
3053 | case SS_ASI_PST32_P: | |
3054 | case SS_ASI_PST32_S: | |
3055 | case SS_ASI_PST8_PL: | |
3056 | case SS_ASI_PST8_SL: | |
3057 | case SS_ASI_PST16_PL: | |
3058 | case SS_ASI_PST16_SL: | |
3059 | case SS_ASI_PST32_PL: | |
3060 | case SS_ASI_PST32_SL: | |
3061 | break; | |
3062 | default: | |
3063 | if (MA_CAS != op) { | |
3064 | va += reg2; | |
3065 | } | |
3066 | break; | |
3067 | } | |
3068 | ||
3069 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: LE load/store pc=0x%llx instr=0x%x count=%d asi=0x%x\n", sp->pc, op, (1 << size), asi); ); | |
3070 | ||
3071 | /* | |
3072 | * OK - Step 1 : to do or not do a TLB translation. | |
3073 | * The assumption here is that privilege checks have already happened. | |
3074 | */ | |
3075 | ||
3076 | #if ERROR_INJECTION | |
3077 | errorp = sp->errorp; | |
3078 | #endif | |
3079 | /* quick check of alignment */ | |
3080 | if ((va & (tvaddr_t)align_mask) != 0) { | |
3081 | sparcv9_trap_type_t tt; | |
3082 | ||
3083 | if (v9p->pstate.addr_mask) | |
3084 | va &= MASK64(31,0); /* SV9_ID125 FIXME */ | |
3085 | ||
3086 | DBGALIGN( lprintf(sp->gid,"Miss data access pc=0x%llx va=0x%llx align_mask=0x%llx\n", sp->pc, va, (tvaddr_t)align_mask); ); | |
3087 | /* alignment error force a trap */ | |
3088 | VA48_WARNING(sp, va); | |
3089 | SET_DTLB_FAULT( nsp, VA48(va) ); | |
3090 | MEMORY_ACCESS_TRAP(); | |
3091 | ||
3092 | if ((MA_ldfp64 == memop || MA_stfp64 == memop) && | |
3093 | ((va & 0x7) == 0x4)) | |
3094 | tt = ((memop == MA_ldfp64) ? | |
3095 | Sparcv9_trap_LDDF_mem_address_not_aligned : | |
3096 | Sparcv9_trap_STDF_mem_address_not_aligned); | |
3097 | else | |
3098 | tt = Sparcv9_trap_mem_address_not_aligned; | |
3099 | ||
3100 | v9p->post_precise_trap(sp, tt); | |
3101 | return; | |
3102 | } | |
3103 | ||
3104 | /* Find the pa corresponding to the line we need */ | |
3105 | tag = va & XDCACHE_TAG_MASK; | |
3106 | ||
3107 | /* | |
3108 | * We have to get the PA from the EA ... this depends on the mode | |
3109 | * and the type of access. | |
3110 | */ | |
3111 | ||
3112 | pa_tag = tag; | |
3113 | if (v9p->pstate.addr_mask) { | |
3114 | pa_tag &= MASK64(31,0); | |
3115 | va &= MASK64(31,0); | |
3116 | /* NOTE: we dont mask tag ... we allow that to match the 64bit address */ | |
3117 | } | |
3118 | ||
3119 | pa = va; | |
3120 | flags = SS_TLB_FLAG_READ | SS_TLB_FLAG_WRITE; /* default access flags */ | |
3121 | ||
3122 | ||
3123 | ||
3124 | /* | |
3125 | * OK perform the TLB access based on the context | |
3126 | * and partition id selected | |
3127 | */ | |
3128 | ||
3129 | /* default read and write permission for MMU bypass */ | |
3130 | perm_cache = XDCACHE_READ_PERM | XDCACHE_WRITE_PERM; | |
3131 | ||
3132 | if (!(mflags & MF_MMU_Bypass)) { | |
3133 | ss_tlb_t * tlbp; | |
3134 | tlb_entry_t *tep, *tmp_tep; | |
3135 | tlb_entry_t te_copy; | |
3136 | uint_t idx, partid; | |
3137 | ss_trap_type_t miss_trap_type; | |
3138 | uint_t context, miss_context; | |
3139 | bool_t search_tlb_again; | |
3140 | ||
3141 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: performing TLB access \n"); ); | |
3142 | ||
3143 | /* If not priv mode and mmu is off, translate real addresses */ | |
3144 | search_tlb_again = false; | |
3145 | if (!nsp->dmmu.enabled) | |
3146 | context = SS_TLB_REAL_CONTEXT; | |
3147 | else { | |
3148 | ||
3149 | /* figure out the context value */ | |
3150 | switch (context_type) { | |
3151 | case ss_ctx_primary: | |
3152 | ASSERT((mflags & MF_TLB_Real_Ctx) == 0); | |
3153 | miss_context = context = nsp->pri_context; | |
3154 | if (nsp->pri_context != nsp->pri_context1) | |
3155 | search_tlb_again = true; | |
3156 | break; | |
3157 | case ss_ctx_secondary: | |
3158 | ASSERT((mflags & MF_TLB_Real_Ctx) == 0); | |
3159 | miss_context = context = nsp->sec_context; | |
3160 | if (nsp->sec_context != nsp->sec_context1) | |
3161 | search_tlb_again = true; | |
3162 | break; | |
3163 | case ss_ctx_nucleus: | |
3164 | if (mflags & MF_TLB_Real_Ctx) | |
3165 | context = SS_TLB_REAL_CONTEXT; | |
3166 | else | |
3167 | context = SS_NUCLEUS_CONTEXT; | |
3168 | miss_context = 0; | |
3169 | break; | |
3170 | default: | |
3171 | fatal("ss_memory_asi_access: Internal Error. Not expecting " | |
3172 | "context type 0x%x\n", context_type); | |
3173 | } | |
3174 | ||
3175 | } | |
3176 | /* | |
3177 | * check out of range address (if lie within the "VA hole" | |
3178 | * or "RA hole") | |
3179 | */ | |
3180 | if ((va >= SS_VA_HOLE_LB) && (va <= SS_VA_HOLE_UB)) { | |
3181 | ss_trap_type_t tt; | |
3182 | /* | |
3183 | * setup the right trap type | |
3184 | * (see N2 PRM, Table 13-15 and Table 13-16) | |
3185 | */ | |
3186 | if (context == SS_TLB_REAL_CONTEXT) | |
3187 | tt = N2_trap_mem_real_range; | |
3188 | else | |
3189 | tt = N2_trap_mem_address_range; | |
3190 | ||
3191 | SET_DTLB_FAULT( nsp, VA48(va) ); | |
3192 | ||
3193 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)tt); | |
3194 | return; | |
3195 | } | |
3196 | ||
3197 | partid = nsp->partid; | |
3198 | ||
3199 | tlbp = nsp->dtlbp; | |
3200 | RW_rdlock(&tlbp->rwlock); | |
3201 | ||
3202 | n2_dtlb_search:; | |
3203 | /* FIXME: Need a better hash than this ! */ | |
3204 | idx = va >> SS_MAX_PAGE_SIZE_BITS; | |
3205 | idx += context + partid; | |
3206 | idx &= SS_TLB_HASH_MASK; | |
3207 | /* | |
3208 | * So we search for a matching page using the info we have in the | |
3209 | * hash - while another thread might possibly be removing or | |
3210 | * inserting an entry into the same table. | |
3211 | */ | |
3212 | ||
3213 | ||
3214 | for ( tep = tlbp->hash[idx].ptr; tep!=(tlb_entry_t*)0; tep = tep->nextp ) { | |
3215 | /* try and match the entry as appropriate */ | |
3216 | if (((tep->tag_pfn ^ va)>>tep->match_shift)==0 && tep->match_context==context && tep->partid == partid) { | |
3217 | goto dtlb_match; | |
3218 | } | |
3219 | } | |
3220 | ||
3221 | /* | |
3222 | * Might need to search the TLB one more time based | |
3223 | * on the shared context value. | |
3224 | */ | |
3225 | if (search_tlb_again) { | |
3226 | search_tlb_again = false; | |
3227 | if (context_type == ss_ctx_primary) | |
3228 | context = nsp->pri_context1; | |
3229 | else | |
3230 | context = nsp->sec_context1; | |
3231 | goto n2_dtlb_search; | |
3232 | } | |
3233 | ||
3234 | RW_unlock(&tlbp->rwlock); | |
3235 | ||
3236 | DBGMISS( lprintf(sp->gid, "dtlb miss: pc=%lx asi=%x va=%lx ctx=%x\n", sp->pc, asi, va, miss_context); ); | |
3237 | ||
3238 | /* | |
3239 | * If the MMU is "disabled" in privileged mode ... this is a real miss, not a | |
3240 | * virtual translation miss, so the fault context and trap type is different | |
3241 | */ | |
3242 | if ((nsp->dmmu.enabled) && (!(mflags & MF_TLB_Real_Ctx))) { | |
3243 | uint64_t pa_offset; | |
3244 | miss_trap_type = ss_hardware_tablewalk(sp, &(nsp->dmmu), tlbp, va, context_type, &flags, &pa_offset); | |
3245 | if (miss_trap_type == SS_trap_NONE) { | |
3246 | pa += pa_offset; | |
3247 | pa_tag += pa_offset; | |
3248 | goto dtlb_priv_test; | |
3249 | } | |
3250 | } else { | |
3251 | miss_context = 0; /* null for ra->pa miss undefined ? */ | |
3252 | miss_trap_type = SS_trap_data_real_translation_miss; | |
3253 | } | |
3254 | nsp->dmmu.tag_access_reg = (va & ~MASK64(12,0)) | miss_context; /* Do this properly later - FIXME */ | |
3255 | dtlb_trap:; | |
3256 | VA48_WARNING(sp, va); | |
3257 | SET_DTLB_FAULT( nsp, va ); | |
3258 | DBGMMU( lprintf(sp->gid, "DMMU tag access = 0x%llx\n", nsp->dmmu.tag_access_reg); ); | |
3259 | MEMORY_ACCESS_TRAP(); | |
3260 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)miss_trap_type); | |
3261 | ||
3262 | return; | |
3263 | ||
3264 | dtlb_match:; | |
3265 | ||
3266 | /* | |
3267 | * try and match the entry again for multi-hit | |
3268 | */ | |
3269 | for (tmp_tep = tep->nextp; tmp_tep != (tlb_entry_t*)0; tmp_tep = tmp_tep->nextp) { | |
3270 | if (((tmp_tep->tag_pfn ^ va) >> tmp_tep->match_shift) == 0 | |
3271 | && tmp_tep->match_context == context && tmp_tep->partid == partid) { | |
3272 | ||
3273 | RW_unlock(&tlbp->rwlock); | |
3274 | ||
3275 | DBGMMU( lprintf(sp->gid, "dtlb miss multi-hit: pc=%lx va=%lx ctx=%x\n", | |
3276 | sp->pc, va, context); ); | |
3277 | DBGMMU( lprintf(sp->gid, " 0x%x %d 0x%llx 0x%llx\n", | |
3278 | tep->tag_context, tep->match_shift, tep->tag_pfn, | |
3279 | tep->tag_pfn + tep->pa_offset); ); | |
3280 | DBGMMU( lprintf(sp->gid, " 0x%x %d 0x%llx 0x%llx\n", | |
3281 | tmp_tep->tag_context, tmp_tep->match_shift, | |
3282 | tmp_tep->tag_pfn, tmp_tep->tag_pfn + tmp_tep->pa_offset); ); | |
3283 | ||
3284 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)SS_trap_data_access_MMU_error); | |
3285 | return; | |
3286 | } | |
3287 | } | |
3288 | ||
3289 | /* we have a matching entry ... now all we have to worry about are the permissions */ | |
3290 | flags = tep->flags; | |
3291 | pa += tep->pa_offset; | |
3292 | pa_tag += tep->pa_offset; | |
3293 | ||
3294 | RW_unlock(&tlbp->rwlock); | |
3295 | ||
3296 | dtlb_priv_test:; | |
3297 | ||
3298 | #if ERROR_INJECTION | |
3299 | /* | |
3300 | * Errors on dtlb hit: stash table_entry pointer and if | |
3301 | * subsequent itlb hit on same entry post error again. | |
3302 | */ | |
3303 | if (dtlb_hit_error_match(sp, op, tep, va)) | |
3304 | return; | |
3305 | #endif | |
3306 | ||
3307 | ||
3308 | /* privilege test apparently takes priority ... p.51 US-I PRM table 6-4 */ | |
3309 | if ((flags & SS_TLB_FLAG_PRIV) && !(mflags & MF_Has_Priv)) { | |
3310 | nsp->dmmu.tag_access_reg = (va & ~MASK64(12,0)) | (miss_context); /* Do this properly later - FIXME */ | |
3311 | miss_trap_type = SS_trap_DAE_privilege_violation; | |
3312 | goto dtlb_trap; | |
3313 | } | |
3314 | ||
3315 | /* | |
3316 | * validate bits NFO, E and CP | |
3317 | */ | |
3318 | if (!(flags & SS_TLB_FLAG_CP) && (mflags & MF_Atomic_Access)) { | |
3319 | nsp->dmmu.tag_access_reg = (va & ~MASK64(12,0)) | miss_context; | |
3320 | miss_trap_type = SS_trap_DAE_nc_page; | |
3321 | goto dtlb_trap; | |
3322 | } | |
3323 | if ((flags & SS_TLB_FLAG_E) && (mflags & MF_No_Fault)) { | |
3324 | nsp->dmmu.tag_access_reg = (va & ~MASK64(12,0)) | miss_context; | |
3325 | miss_trap_type = SS_trap_DAE_so_page; | |
3326 | goto dtlb_trap; | |
3327 | } | |
3328 | if ((flags & SS_TLB_FLAG_NFO) && (!(mflags & MF_No_Fault))) { | |
3329 | nsp->dmmu.tag_access_reg = (va & ~MASK64(12,0)) | miss_context; | |
3330 | miss_trap_type = SS_trap_DAE_NFO_page; | |
3331 | goto dtlb_trap; | |
3332 | } | |
3333 | ||
3334 | if (IS_V9_MA_STORE(op) && !(flags & SS_TLB_FLAG_WRITE)) { | |
3335 | uint64_t ps1, tte_ps1; | |
3336 | nsp->dmmu.tag_access_reg = (va & ~MASK64(12,0)) | (miss_context); /* Do this properly later - FIXME */ | |
3337 | miss_trap_type = SS_trap_fast_data_access_protection; | |
3338 | goto dtlb_trap; | |
3339 | } | |
3340 | ||
3341 | mflags ^= (flags & SS_TLB_FLAG_IE) ? MF_Little_Endian : 0; | |
3342 | ||
3343 | perm_cache = (flags & SS_TLB_FLAG_WRITE) ? XDCACHE_WRITE_PERM : 0; | |
3344 | perm_cache |= (flags & SS_TLB_FLAG_READ) ? XDCACHE_READ_PERM : 0; | |
3345 | } else { | |
3346 | /* Niagara 2 only implements 40 bits of PA, the tlb code | |
3347 | masks PA so here we need to mask bypass PAs */ | |
3348 | pa &= MASK64(39,0); | |
3349 | } | |
3350 | ||
3351 | /* | |
3352 | * OK - now go get the pointer to the line data | |
3353 | * ... start by finding the device that has the | |
3354 | * memory we need. | |
3355 | * optimise: by guessing at the last device found. | |
3356 | */ | |
3357 | ||
3358 | /* now find the device - looking in the cache first */ | |
3359 | ||
3360 | cap = sp->xdc.miss_addrp; | |
3361 | if (!(cap && (cap->baseaddr <= pa) && (pa < cap->topaddr))) { | |
3362 | domain_t * domainp; | |
3363 | config_proc_t * config_procp; | |
3364 | ||
3365 | config_procp = sp->config_procp; | |
3366 | domainp = config_procp->domainp; | |
3367 | ||
3368 | cap = find_domain_address(domainp, pa); | |
3369 | if (cap == NULL) { | |
3370 | /* OK it's a bus error there was no backing store */ | |
3371 | ||
3372 | EXEC_WARNING(("bus error - (@pc=0x%llx, icount=%llu) " | |
3373 | "access to va=0x%llx (pid=0x%x,ctx_type=0x%x,cacheline " | |
3374 | "va=0x%llx -> physical 0x%llx)", sp->pc, ICOUNT(sp), | |
3375 | va, nsp->partid, context_type, tag, pa_tag)); | |
3376 | goto data_access_error; | |
3377 | } | |
3378 | } | |
3379 | ||
3380 | /* try and get the buffer pointer */ | |
3381 | ||
3382 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: calling dev_cacheable\n"); ); | |
3383 | ||
3384 | da = 0; | |
3385 | if (IS_V9_MA_LOAD(op)) | |
3386 | da |= DA_Load; | |
3387 | if (IS_V9_MA_STORE(op)) | |
3388 | da |= DA_Store; | |
3389 | ||
3390 | extent = cap->config_devp->dev_typep->dev_cacheable(cap, da, | |
3391 | pa_tag-cap->baseaddr, &bufp); | |
3392 | ||
3393 | if (extent < XDCACHE_LINE_SIZE) { | |
3394 | bool_t status; | |
3395 | uint_t pio_op; | |
3396 | uint64_t tempreg, *aregp; | |
3397 | ||
3398 | /* Let device handle memory access operation */ | |
3399 | /* bus error again ? or fill from multiple devices ? */ | |
3400 | /* need to check validty for device here ... FIXME */ | |
3401 | ||
3402 | pio_op = memop & MA_Op_Mask; | |
3403 | ||
3404 | if ((MF_Little_Endian & mflags) && (pio_op == MA_St)) { | |
3405 | tempreg = sparcv9_invert_endianess(regp, (1 << size)); | |
3406 | aregp = &tempreg; | |
3407 | } else if ((&(sp->intreg[Reg_sparcv9_g0]) == regp) && | |
3408 | ((pio_op == MA_Ld) || (pio_op == MA_LdSigned))) { | |
3409 | aregp = &tempreg; | |
3410 | } else { | |
3411 | aregp = regp; | |
3412 | } | |
3413 | ||
3414 | status = cap->config_devp->dev_typep->dev_cpu_access(sp, cap, pa-cap->baseaddr, memop, aregp); | |
3415 | ||
3416 | if ((MF_Little_Endian & mflags) && status && (pio_op == MA_Ld || pio_op == MA_LdSigned)) { | |
3417 | *regp = sparcv9_invert_endianess(regp, (1 << size)); | |
3418 | if (pio_op == MA_LdSigned) { | |
3419 | uint32_t shift; | |
3420 | ||
3421 | shift = 64 - (8 << size); | |
3422 | *regp = ((sint64_t)(*regp << shift)) >> shift; | |
3423 | } | |
3424 | } | |
3425 | ||
3426 | ASSERT(0LL == sp->intreg[Reg_sparcv9_g0]); | |
3427 | ||
3428 | if (status) | |
3429 | goto done; | |
3430 | ||
3431 | EXEC_WARNING(("data access error - (@pc=0x%llx) access to va=0x%llx " | |
3432 | "(pid=0x%x,ctx_type=0x%x,physical 0x%llx)", sp->pc, va, | |
3433 | nsp->partid, context_type, pa)); | |
3434 | ||
3435 | data_access_error:; | |
3436 | ||
3437 | MEMORY_ACCESS_TRAP(); | |
3438 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: post_precise_trap \n"); ); | |
3439 | ||
3440 | v9p->post_precise_trap(sp, Sparcv9_trap_data_access_error); /* FIXME: right trap ? */ | |
3441 | return; | |
3442 | } | |
3443 | ||
3444 | #if ERROR_INJECTION | |
3445 | /* | |
3446 | * processor-wide checks for unhandled L2 and DRAM errors | |
3447 | */ | |
3448 | if (l2dram_access_error_match(sp, op, pa)) | |
3449 | return; | |
3450 | #endif | |
3451 | ||
3452 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: handling cacheable device memory\n"); ); | |
3453 | ||
3454 | /* | |
3455 | * Now handle cacheable device memory | |
3456 | * | |
3457 | * Because we implicitly assume that the xdc uses the current context | |
3458 | * we only add missed entries to the xdc iff it was a normal memory op | |
3459 | */ | |
3460 | ||
3461 | if ((mflags & (MF_Normal|MF_Little_Endian)) == MF_Normal) { | |
3462 | long ridx; | |
3463 | xdcache_line_t * xclp; | |
3464 | ||
3465 | sp->xdc.miss_addrp = cap; /* cache for next time */ | |
3466 | ||
3467 | ridx = (va >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK; | |
3468 | xclp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + ridx); | |
3469 | /* only cache if memory is cacheable */ | |
3470 | /* fill in the line */ | |
3471 | /* WARNING: This tag may be a full 64bit value even if pstate.am=1 */ | |
3472 | /* do not use ea_offset with anything else other than tag */ | |
3473 | xclp->tag = tag | perm_cache | sp->tagstate; | |
3474 | xclp->offset = ((uint64_t)bufp) - tag; | |
3475 | } | |
3476 | ||
3477 | /* | |
3478 | * Sigh now complete the load/store on behalf of the original | |
3479 | * load instruction | |
3480 | */ | |
3481 | ||
3482 | #if HOST_CPU_LITTLE_ENDIAN | |
3483 | /* temporary hack */ | |
3484 | mflags ^= MF_Little_Endian; | |
3485 | #endif | |
3486 | ||
3487 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: completing load/store on behalf of original instr.\n"); ); | |
3488 | ||
3489 | ptr = (uint8_t*)(bufp + (pa & XDCACHE_LINE_OFFSET_MASK) ); | |
3490 | ||
3491 | switch (op) { | |
3492 | uint64_t val, cval; | |
3493 | ||
3494 | case MA_Ld: | |
3495 | switch (size) { | |
3496 | case MA_Size8: | |
3497 | val = *(uint8_t*)ptr; | |
3498 | break; | |
3499 | case MA_Size16: | |
3500 | val = *(uint16_t*)ptr; | |
3501 | break; | |
3502 | case MA_Size32: | |
3503 | val = *(uint32_t*)ptr; | |
3504 | break; | |
3505 | case MA_Size64: | |
3506 | val = *(uint64_t*)ptr; | |
3507 | break; | |
3508 | default: | |
3509 | abort(); | |
3510 | } | |
3511 | if (MF_Little_Endian & mflags) { | |
3512 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_Ld with LE - val=0x%llx count=0x%x\n", | |
3513 | val, (1 << size)); ); | |
3514 | val = sparcv9_invert_endianess(&val, (1 << size)); | |
3515 | } | |
3516 | goto complete_load; | |
3517 | ||
3518 | case MA_LdSigned: | |
3519 | switch (size) { | |
3520 | case MA_Size8: | |
3521 | val = *(sint8_t*)ptr; | |
3522 | break; | |
3523 | case MA_Size16: | |
3524 | val = *(sint16_t*)ptr; | |
3525 | break; | |
3526 | case MA_Size32: | |
3527 | val = *(sint32_t*)ptr; | |
3528 | break; | |
3529 | default: | |
3530 | abort(); | |
3531 | } | |
3532 | if (MF_Little_Endian & mflags) { | |
3533 | uint32_t shift; | |
3534 | ||
3535 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_LdSigned with LE - val=0x%llx count=0x%x\n", | |
3536 | val, (1 << size)); ); | |
3537 | val = sparcv9_invert_endianess(&val, (1 << size)); | |
3538 | shift = 64 - (8 << size); | |
3539 | val = ((sint64_t)(val << shift)) >> shift; | |
3540 | } | |
3541 | ||
3542 | goto complete_load; | |
3543 | ||
3544 | case MA_St: | |
3545 | if (MF_Little_Endian & mflags) { | |
3546 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_St with LE - val=0x%llx\n", *regp); ); | |
3547 | val = sparcv9_invert_endianess(regp, (1 << size)); | |
3548 | } else { | |
3549 | val = *regp; | |
3550 | } | |
3551 | if (mflags & MF_Blk_Init) { | |
3552 | /* If line in L2 cache, leave data alone, otherwise zero it */ | |
3553 | /* XXX How to simulate? */ | |
3554 | ((uint64_t*)ptr)[0] = 0; | |
3555 | ((uint64_t*)ptr)[1] = 0; | |
3556 | ((uint64_t*)ptr)[2] = 0; | |
3557 | ((uint64_t*)ptr)[3] = 0; | |
3558 | ((uint64_t*)ptr)[4] = 0; | |
3559 | ((uint64_t*)ptr)[5] = 0; | |
3560 | ((uint64_t*)ptr)[6] = 0; | |
3561 | ((uint64_t*)ptr)[7] = 0; | |
3562 | } | |
3563 | switch (size) { | |
3564 | case MA_Size8: | |
3565 | *(uint8_t*)ptr = val; | |
3566 | break; | |
3567 | case MA_Size16: | |
3568 | *(uint16_t*)ptr = val; | |
3569 | break; | |
3570 | case MA_Size32: | |
3571 | *(uint32_t*)ptr = val; | |
3572 | break; | |
3573 | case MA_Size64: | |
3574 | *(uint64_t*)ptr = val; | |
3575 | break; | |
3576 | default: | |
3577 | abort(); | |
3578 | } | |
3579 | break; | |
3580 | ||
3581 | case MA_LdFloat: | |
3582 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_LdFloat with LE - \n"); ); | |
3583 | ASSERT(&(sp->intreg[Reg_sparcv9_g0]) != regp); | |
3584 | switch (size) { | |
3585 | case MA_Size8: | |
3586 | *regp = *(uint8_t*)ptr; | |
3587 | break; | |
3588 | case MA_Size16: | |
3589 | if (MF_Little_Endian & mflags) { | |
3590 | val = *(uint16_t*)ptr; | |
3591 | *regp = sparcv9_invert_endianess(&val, sizeof (uint16_t)); | |
3592 | } else | |
3593 | *regp = *(uint16_t*)ptr; | |
3594 | break; | |
3595 | case MA_Size32: | |
3596 | if (MF_Little_Endian & mflags) { | |
3597 | val = *(ieee_fp32_t*)ptr; | |
3598 | *(ieee_fp32_t*)regp = | |
3599 | sparcv9_invert_endianess(&val, | |
3600 | sizeof (ieee_fp32_t)); | |
3601 | } else | |
3602 | *(ieee_fp32_t*)regp = *(ieee_fp32_t*)ptr; | |
3603 | break; | |
3604 | case MA_Size64: | |
3605 | if (MF_Little_Endian & mflags) | |
3606 | *(ieee_fp64_t*)regp = | |
3607 | sparcv9_invert_endianess( | |
3608 | (uint64_t *)ptr, | |
3609 | sizeof (ieee_fp64_t)); | |
3610 | else | |
3611 | *(ieee_fp64_t*)regp = *(ieee_fp64_t*)ptr; | |
3612 | break; | |
3613 | case MA_Size512: | |
3614 | if ((MF_Little_Endian & mflags) == 0) { | |
3615 | uint_t i; | |
3616 | for (i = 0; i < 8; i++) { | |
3617 | *(ieee_fp64_t*)(regp + i) = | |
3618 | *(ieee_fp64_t*)(ptr + i*8); | |
3619 | } | |
3620 | } else { | |
3621 | uint_t i; | |
3622 | for (i = 0; i < 8; i++) { | |
3623 | *(ieee_fp64_t*)(regp + i) = | |
3624 | sparcv9_invert_endianess( | |
3625 | (uint64_t *)(ptr + i*8), | |
3626 | sizeof (ieee_fp64_t)); | |
3627 | } | |
3628 | } | |
3629 | break; | |
3630 | #ifdef PROCESSOR_SUPPORTS_QUADFP /* { */ | |
3631 | case MA_Size128: | |
3632 | ASSERT((MF_Little_Endian & mflags) == 0); | |
3633 | *(ieee_fp128_t*)regp = *(ieee_fp128_t*)ptr; | |
3634 | break; | |
3635 | #endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */ | |
3636 | default: | |
3637 | abort(); | |
3638 | } | |
3639 | goto done; | |
3640 | ||
3641 | case MA_StFloat: | |
3642 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_StFloat with LE - \n"); ); | |
3643 | ||
3644 | switch (size) { | |
3645 | case MA_Size8: | |
3646 | *(uint8_t*)ptr = (*regp) & MASK64(7,0); | |
3647 | break; | |
3648 | case MA_Size16: | |
3649 | val = (*regp) & MASK64(15,0); | |
3650 | if (MF_Little_Endian & mflags) | |
3651 | *(uint16_t*)ptr = | |
3652 | sparcv9_invert_endianess(&val, | |
3653 | sizeof (uint16_t)); | |
3654 | else | |
3655 | *(uint16_t*)ptr = val; | |
3656 | break; | |
3657 | case MA_Size32: | |
3658 | if (MF_Little_Endian & mflags) { | |
3659 | val = *(ieee_fp32_t*)regp; | |
3660 | *(ieee_fp32_t*)ptr = | |
3661 | sparcv9_invert_endianess(&val, | |
3662 | sizeof (ieee_fp32_t)); | |
3663 | } else | |
3664 | *(ieee_fp32_t*)ptr = *(ieee_fp32_t*)regp; | |
3665 | break; | |
3666 | case MA_Size64: | |
3667 | switch (asi) { | |
3668 | case SS_ASI_PST8_PL: | |
3669 | case SS_ASI_PST8_SL: | |
3670 | val = *(ieee_fp64_t*)(ptr); | |
3671 | for (i=0; i < 8; i++) { | |
3672 | if ((reg2>>i) & 1) { | |
3673 | val &= ~MASK64(63-(i*8),(56-(i*8))); | |
3674 | val |= (sparcv9_invert_endianess(regp, | |
3675 | sizeof (ieee_fp64_t)) & MASK64(63-(i*8),(56-(i*8)))); | |
3676 | } | |
3677 | } | |
3678 | *(ieee_fp64_t*)(ptr) = (val); | |
3679 | break; | |
3680 | case SS_ASI_PST8_P: | |
3681 | case SS_ASI_PST8_S: | |
3682 | val = (*(ieee_fp64_t*)ptr); | |
3683 | for (i=0; i < 8; i++) { | |
3684 | if ((reg2>>i) & 1) { | |
3685 | val &= ~MASK64((i*8)+7,i*8); | |
3686 | val |= (*(ieee_fp64_t*)regp & MASK64((i*8)+7,i*8)); | |
3687 | } | |
3688 | } | |
3689 | *(ieee_fp64_t*)(ptr) = (val); | |
3690 | break; | |
3691 | case SS_ASI_PST16_PL: | |
3692 | case SS_ASI_PST16_SL: | |
3693 | val = *(ieee_fp64_t*)(ptr); | |
3694 | for (i=0; i < 4; i++) { | |
3695 | if ((reg2>>i) & 1) { | |
3696 | val &= ~MASK64(63-(i*16),48-(i*16)); | |
3697 | val |= (sparcv9_invert_endianess(regp, | |
3698 | sizeof (ieee_fp64_t)) & MASK64(63-(i*16),(48-(i*16)))); | |
3699 | } | |
3700 | } | |
3701 | *(ieee_fp64_t*)(ptr) = (val); | |
3702 | break; | |
3703 | case SS_ASI_PST16_P: | |
3704 | case SS_ASI_PST16_S: | |
3705 | val = (*(ieee_fp64_t*)ptr); | |
3706 | for (i=0; i < 4; i++) { | |
3707 | if ((reg2>>i) & 1) { | |
3708 | val &= ~MASK64((i*16)+15,i*16); | |
3709 | val |= (*(ieee_fp64_t*)regp & MASK64((i*16)+15,i*16)); | |
3710 | } | |
3711 | } | |
3712 | *(ieee_fp64_t*)(ptr) = (val); | |
3713 | break; | |
3714 | case SS_ASI_PST32_PL: | |
3715 | case SS_ASI_PST32_SL: | |
3716 | val = *(ieee_fp64_t*)(ptr); | |
3717 | for (i=0; i < 2; i++) { | |
3718 | if ((reg2>>i) & 1) { | |
3719 | val &= ~MASK64(63-(i*32),32-(i*32)); | |
3720 | val |= (sparcv9_invert_endianess(regp, | |
3721 | sizeof (ieee_fp64_t)) & MASK64(63-(i*32),(32-(i*32)))); | |
3722 | } | |
3723 | } | |
3724 | *(ieee_fp64_t*)(ptr) = (val); | |
3725 | break; | |
3726 | case SS_ASI_PST32_P: | |
3727 | case SS_ASI_PST32_S: | |
3728 | val = (*(ieee_fp64_t*)ptr); | |
3729 | for (i=0; i < 2; i++) { | |
3730 | if ((reg2>>i) & 1) { | |
3731 | val &= ~MASK64((i*32)+31,i*32); | |
3732 | val |= (*(ieee_fp64_t*)regp & MASK64((i*32)+31,i*32)); | |
3733 | } | |
3734 | } | |
3735 | *(ieee_fp64_t*)(ptr) = (val); | |
3736 | break; | |
3737 | default: | |
3738 | if (MF_Little_Endian & mflags) | |
3739 | *(ieee_fp64_t*)ptr = | |
3740 | sparcv9_invert_endianess(regp, | |
3741 | sizeof (ieee_fp64_t)); | |
3742 | else | |
3743 | *(ieee_fp64_t*)ptr = *(ieee_fp64_t*)regp; | |
3744 | break; | |
3745 | } | |
3746 | break; | |
3747 | case MA_Size512: | |
3748 | if ((MF_Little_Endian & mflags) == 0) { | |
3749 | uint_t i; | |
3750 | for (i = 0; i < 8; i++) { | |
3751 | *(ieee_fp64_t*)(ptr + i*8) = | |
3752 | *(ieee_fp64_t*)(regp + i); | |
3753 | } | |
3754 | } else { | |
3755 | uint_t i; | |
3756 | for (i = 0; i < 8; i++) { | |
3757 | *(ieee_fp64_t*)(ptr + i*8) = | |
3758 | sparcv9_invert_endianess( | |
3759 | (regp + i), | |
3760 | sizeof (ieee_fp64_t)); | |
3761 | } | |
3762 | } | |
3763 | break; | |
3764 | #ifdef PROCESSOR_SUPPORTS_QUADFP /* { */ | |
3765 | case MA_Size128: | |
3766 | ASSERT((MF_Little_Endian & mflags) == 0); | |
3767 | *(ieee_fp128_t*)ptr = *(ieee_fp128_t*)regp; | |
3768 | break; | |
3769 | #endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */ | |
3770 | default: | |
3771 | abort(); | |
3772 | } | |
3773 | goto done; | |
3774 | ||
3775 | case MA_LdSt: | |
3776 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_LdSt with LE - \n"); ); | |
3777 | switch (size) { | |
3778 | case MA_Size8: | |
3779 | val = host_ldstub(ptr, reg2, *regp); | |
3780 | break; | |
3781 | default: | |
3782 | abort(); | |
3783 | } | |
3784 | goto complete_load; | |
3785 | ||
3786 | case MA_Swap: | |
3787 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_Swap with LE - \n"); ); | |
3788 | if (MF_Little_Endian & mflags) { | |
3789 | val = sparcv9_invert_endianess(regp, (1 << size)); | |
3790 | } else { | |
3791 | val = *regp; | |
3792 | } | |
3793 | switch (size) { | |
3794 | case MA_Size32: | |
3795 | val = host_swap((uint32_t *)ptr, val); | |
3796 | break; | |
3797 | default: | |
3798 | abort(); | |
3799 | } | |
3800 | if (MF_Little_Endian & mflags) { | |
3801 | val = sparcv9_invert_endianess(&val, (1 << size)); | |
3802 | } | |
3803 | goto complete_load; | |
3804 | ||
3805 | case MA_CAS: | |
3806 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_CAS with LE - \n"); ); | |
3807 | if (MF_Little_Endian & mflags) { | |
3808 | val = sparcv9_invert_endianess(regp, (1 << size)); | |
3809 | cval = sparcv9_invert_endianess(®2, (1 << size)); | |
3810 | } else { | |
3811 | val = *regp; | |
3812 | cval = reg2; | |
3813 | } | |
3814 | switch (size) { | |
3815 | case MA_Size32: | |
3816 | val = host_cas32((uint32_t *)ptr, cval, val); | |
3817 | break; | |
3818 | case MA_Size64: | |
3819 | val = host_cas64((uint64_t *)ptr, cval, val); | |
3820 | break; | |
3821 | default: | |
3822 | abort(); | |
3823 | } | |
3824 | if (MF_Little_Endian & mflags) { | |
3825 | val = sparcv9_invert_endianess(&val, (1 << size)); | |
3826 | } | |
3827 | goto complete_load; | |
3828 | ||
3829 | complete_load: | |
3830 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) | |
3831 | *regp = val; | |
3832 | break; | |
3833 | ||
3834 | case MA_LdDouble: | |
3835 | switch (size) { | |
3836 | case MA_Size64: /* standard sparc LDD instruction */ | |
3837 | val = *(uint64_t *)ptr; | |
3838 | regp[0] = (uint32_t)(val >> 32); | |
3839 | regp[1] = (uint32_t)val; | |
3840 | if (MF_Little_Endian & mflags) { | |
3841 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_ldDouble with LE - val=0x%llx count=0x%x\n", | |
3842 | val, (1 << size)); ); | |
3843 | regp[0] = sparcv9_invert_endianess(®p[0], (1 << size)>>1); | |
3844 | regp[1] = sparcv9_invert_endianess(®p[1], (1 << size)>>1); | |
3845 | } | |
3846 | sp->intreg[Reg_sparcv9_g0] = 0; /* regp might be %g0 */ | |
3847 | break; | |
3848 | case MA_Size128: | |
3849 | host_atomic_get128be((uint64_t *)ptr, regp, ®p[1]); | |
3850 | if (MF_Little_Endian & mflags) { | |
3851 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_ldDouble with LE - val=0x%llx,0x%llx count=0x%x\n", | |
3852 | regp[0], regp[1], (1 << size)); ); | |
3853 | regp[0] = sparcv9_invert_endianess(®p[0], (1 << size)>>1); | |
3854 | regp[1] = sparcv9_invert_endianess(®p[1], (1 << size)>>1); | |
3855 | } | |
3856 | sp->intreg[Reg_sparcv9_g0] = 0; /* regp might be %g0 */ | |
3857 | break; | |
3858 | default: | |
3859 | fatal("ss_memory_asi_access: internal error - " | |
3860 | "illegal size for MA_LdDouble"); | |
3861 | } | |
3862 | break; | |
3863 | ||
3864 | case MA_StDouble: | |
3865 | { | |
3866 | uint32_t reven; | |
3867 | uint32_t rodd; | |
3868 | ASSERT(size == MA_Size64); | |
3869 | if (MF_Little_Endian & mflags) { | |
3870 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_StDouble with LE - reven=0x%x rodd=0x%x count=0x%x\n", | |
3871 | (uint32_t)regp[0], (uint32_t)regp[1], (1 << size)); ); | |
3872 | reven = (uint32_t)sparcv9_invert_endianess(®p[0], (1 << size)>>1); | |
3873 | rodd = (uint32_t)sparcv9_invert_endianess(®p[1], (1 << size)>>1); | |
3874 | } else { | |
3875 | reven = (uint32_t)regp[0]; | |
3876 | rodd = (uint32_t)regp[1]; | |
3877 | } | |
3878 | val = ((uint64_t)reven << 32) | ((uint32_t)rodd); | |
3879 | *(uint64_t *)ptr = val; | |
3880 | } | |
3881 | break; | |
3882 | ||
3883 | case MA_V9_LdFSR: | |
3884 | ASSERT( MA_Size32 == size ); | |
3885 | val = *(uint32_t*)ptr; | |
3886 | if (MF_Little_Endian & mflags) | |
3887 | val = sparcv9_invert_endianess(&val, (1 << size)); | |
3888 | v9_set_fsr_lower(sp, val); | |
3889 | break; | |
3890 | ||
3891 | case MA_V9_LdXFSR: | |
3892 | ASSERT( MA_Size64 == size ); | |
3893 | val = *(uint64_t*)ptr; | |
3894 | if (MF_Little_Endian & mflags) | |
3895 | val = sparcv9_invert_endianess(&val, (1 << size)); | |
3896 | v9_set_fsr(sp, val); | |
3897 | break; | |
3898 | ||
3899 | case MA_V9_StFSR: | |
3900 | ASSERT( MA_Size32 == size ); | |
3901 | val = v9_get_fsr(sp); | |
3902 | if (MF_Little_Endian & mflags) | |
3903 | val = sparcv9_invert_endianess(&val, (1 << size)); | |
3904 | *(uint32_t*)ptr = val & MASK64(31,0); | |
3905 | /* FTT is cleared on read of FSR */ | |
3906 | sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK; | |
3907 | DBGFSR( lprintf(sp->gid, "stfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp->pc, v9_get_fsr(sp), val); ); | |
3908 | break; | |
3909 | ||
3910 | case MA_V9_StXFSR: | |
3911 | ASSERT( MA_Size64 == size ); | |
3912 | val = v9_get_fsr(sp); | |
3913 | if (MF_Little_Endian & mflags) | |
3914 | val = sparcv9_invert_endianess(&val, (1 << size)); | |
3915 | *(uint64_t*)ptr = val; | |
3916 | /* FTT is cleared on read of FSR */ | |
3917 | sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK; | |
3918 | DBGFSR( lprintf(sp->gid, "stxfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp->pc, v9_get_fsr(sp), val); ); | |
3919 | break; | |
3920 | ||
3921 | default: | |
3922 | abort(); | |
3923 | } | |
3924 | ||
3925 | done:; | |
3926 | /* | |
3927 | * Finally go get the next instruction | |
3928 | */ | |
3929 | ||
3930 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: getting the next instr.\n"); ); | |
3931 | ||
3932 | NEXT_INSTN(sp); | |
3933 | } | |
3934 | ||
3935 | ||
3936 | /* | |
3937 | * This function is called through the ASI store to the interrupt vector | |
3938 | * dispatch register ASI_INTR_W (0x73), the store value is passed in by 'val'. | |
3939 | * sp is the originator and tnpp is the target processor. On a | |
3940 | * single node system, sp will belong to tnpp. But on multinode systems, | |
3941 | * if the cross call is going across nodes, the sp will be on a different chip | |
3942 | * than the tnpp. | |
3943 | */ | |
3944 | void niagara2_send_xirq(simcpu_t * sp, ss_proc_t * tnpp, uint64_t val) | |
3945 | { | |
3946 | uint_t strand, vcore_id, intr_id; | |
3947 | uint_t vec_bit; | |
3948 | uint_t type; | |
3949 | ss_strand_t * tstrandp; | |
3950 | bool_t pay_attention; | |
3951 | #ifdef VFALLS | |
3952 | sparcv9_cpu_t * v9p; | |
3953 | ss_strand_t * nsp; | |
3954 | int source_vcore_id; | |
3955 | #endif | |
3956 | vcore_id = (val & MASK64(13, 8)) >> 8; | |
3957 | vec_bit = val & MASK64(5,0); | |
3958 | ||
3959 | /* normalize strand to internal strand */ | |
3960 | strand = STRANDID2IDX(tnpp, vcore_id); | |
3961 | if (!VALIDIDX(tnpp, strand)) { | |
3962 | fatal("[0x%llx] (pc=0x%llx)\tWrite to ASI_INTR_W with " | |
3963 | "illegal virtual core value 0x%llx. ", | |
3964 | sp->gid, sp->pc, vcore_id); | |
3965 | return; | |
3966 | } | |
3967 | ||
3968 | tstrandp = &(tnpp->ss_strandp[strand]); | |
3969 | ||
3970 | /* | |
3971 | * check if the destination interrupt ID matches the ID in the interrupt Id register | |
3972 | * (Niagara 2 uses ASI_CMP_CORE_INTR_ID as the interrupt Id register. The Id is defined | |
3973 | * by bits 5:0, and it should match the virtual core Id, i.e., ASI_CMP_CORE_ID bit 5:0. | |
3974 | */ | |
3975 | intr_id = tstrandp->vcore_id; | |
3976 | if (intr_id != vcore_id) { | |
3977 | fatal("[0x%llx] (pc=0x%llx)\tDetected a miss-matched interrupt Id: " | |
3978 | "dst_intr_id = 0x%llx cmp_core_intr_id = 0x%llx", | |
3979 | sp->gid, sp->pc, vcore_id, intr_id); | |
3980 | return; | |
3981 | } | |
3982 | ||
3983 | pthread_mutex_lock(&tstrandp->irq_lock); | |
3984 | pay_attention = (0LL == tstrandp->irq_vector); | |
3985 | tstrandp->irq_vector |= (1LL<<vec_bit); | |
3986 | pthread_mutex_unlock(&tstrandp->irq_lock); | |
3987 | ||
3988 | DBGE(lprintf(sp->gid, "niagara2_send_xirq: target strand=%p irq_vector=0x%llx\n", | |
3989 | tstrandp, tstrandp->irq_vector); ); | |
3990 | #ifdef VFALLS | |
3991 | v9p = sp->specificp; | |
3992 | nsp = v9p->impl_specificp; | |
3993 | source_vcore_id = nsp->vcore_id; | |
3994 | DBGMULNODE(lprintf(sp->gid, "niagara2_send_xirq from vcore_id %d on node %d" | |
3995 | " to vcore_id %d on node %d irq_vector=%llx\n", | |
3996 | source_vcore_id, sp->config_procp->proc_id, vcore_id, tnpp->config_procp->proc_id, | |
3997 | tstrandp->irq_vector); ); | |
3998 | #endif | |
3999 | /* | |
4000 | * The complicated part here is that the execution thread | |
4001 | * determines when the interrupt is actually delivered if at | |
4002 | * all, all we need to do here is to ensure that that thread | |
4003 | * pays attention to the fact the the interrupt vector status has | |
4004 | * changed .. we only care if it goes non-zero ... | |
4005 | */ | |
4006 | ||
4007 | if (pay_attention) { | |
4008 | sparcv9_cpu_t * tv9p; | |
4009 | simcpu_t * tsp; | |
4010 | ||
4011 | tv9p = tnpp->strand[strand]; | |
4012 | ||
4013 | tsp = tv9p->simp; | |
4014 | tsp->async_event = true; | |
4015 | } | |
4016 | } | |
4017 | ||
4018 | ||
4019 | static uint64_t niagara2_ext_signal(config_proc_t * config_procp, ext_sig_t sigtype, void *vp) | |
4020 | { | |
4021 | ss_proc_t *npp; | |
4022 | simcpu_t *sp; | |
4023 | sparcv9_cpu_t *v9p; | |
4024 | ss_strand_t *nsp; | |
4025 | ncu_t *ncup; | |
4026 | pcie_mondo_t mondo; | |
4027 | bool_t pay_attention; | |
4028 | uint64_t int_man; | |
4029 | uint_t thread_id, device_id, strand, vec_bit; | |
4030 | int i; | |
4031 | ||
4032 | ||
4033 | npp = (ss_proc_t*)(config_procp->procp); | |
4034 | ncup = npp->ncup; | |
4035 | ||
4036 | switch (sigtype) { | |
4037 | case ES_NIU: | |
4038 | device_id = *(uint_t *)vp; | |
4039 | if ((device_id < NCU_DEV_NIU_LB) || (device_id > NCU_DEV_NIU_UB)) | |
4040 | fatal("niagara2_ext_signal: NIU device_id 0x%lx out of range", | |
4041 | device_id); | |
4042 | int_man = ncup->regs.int_man[device_id]; | |
4043 | thread_id = NCU_INT_MAN_CPUID(int_man); | |
4044 | vec_bit = int_man & INTR_VEC_MASK; | |
4045 | break; | |
4046 | case ES_SSI: | |
4047 | device_id = NCU_DEV_SSI; | |
4048 | int_man = ncup->regs.int_man[device_id]; | |
4049 | thread_id = NCU_INT_MAN_CPUID(int_man); | |
4050 | vec_bit = int_man & INTR_VEC_MASK; | |
4051 | break; | |
4052 | case ES_PCIE: | |
4053 | mondo = *(pcie_mondo_t *)vp; | |
4054 | thread_id = mondo.thread_id; | |
4055 | ||
4056 | pthread_mutex_lock(&ncup->ncu_lock); | |
4057 | ||
4058 | if (ncup->regs.mondo_int_busy[thread_id] & NCU_MONDO_INT_BUSY) { | |
4059 | pthread_mutex_unlock(&ncup->ncu_lock); | |
4060 | return NCU_INT_NACK; | |
4061 | } else { | |
4062 | ncup->regs.mondo_int_data0[thread_id] = mondo.data[0]; | |
4063 | ncup->regs.mondo_int_data1[thread_id] = mondo.data[1]; | |
4064 | ncup->regs.mondo_int_busy[thread_id] = NCU_MONDO_INT_BUSY; | |
4065 | } | |
4066 | ||
4067 | vec_bit = ncup->regs.mondo_int_vec & INTR_VEC_MASK; | |
4068 | ||
4069 | pthread_mutex_unlock(&ncup->ncu_lock); | |
4070 | ||
4071 | break; | |
4072 | ||
4073 | case ES_LEGION_SAVE_STATE: | |
4074 | for (i=(npp->nstrands)-1; i>=0; i--) { | |
4075 | v9p = npp->strand[i]; | |
4076 | nsp = (ss_strand_t *)(v9p->impl_specificp); | |
4077 | nsp->pending_async_tt = SS_trap_legion_save_state; | |
4078 | sp = v9p->simp; | |
4079 | DBGE( lprintf(sp->gid, "ES_SAVE_STATE set_attention\n"); ); | |
4080 | sp->exception_pending = true; | |
4081 | ||
4082 | } | |
4083 | return (0); | |
4084 | ||
4085 | default: | |
4086 | EXEC_WARNING(("processor%d: ext_signal %d ignored", | |
4087 | config_procp->proc_id, sigtype)); | |
4088 | return NCU_INT_NACK; | |
4089 | } | |
4090 | ||
4091 | /* | |
4092 | * send IRQ interrupt to the target strand | |
4093 | */ | |
4094 | strand = STRANDID2IDX(npp, thread_id); | |
4095 | if (!VALIDIDX(npp, strand)) { | |
4096 | EXEC_WARNING(("niagara2_ext_signal called with illegal strand 0x%x", strand)); | |
4097 | return NCU_INT_NACK; | |
4098 | } | |
4099 | ||
4100 | nsp = &(npp->ss_strandp[strand]); | |
4101 | v9p = npp->strand[strand]; | |
4102 | sp = v9p->simp; | |
4103 | ||
4104 | pthread_mutex_lock(&nsp->irq_lock); | |
4105 | pay_attention = (0LL == nsp->irq_vector); | |
4106 | nsp->irq_vector |= 1LL << vec_bit; | |
4107 | pthread_mutex_unlock(&nsp->irq_lock); | |
4108 | ||
4109 | DBGE(lprintf(sp->gid, "niagara2_ext_signal: target strand=%p irq_vector=%llx\n", | |
4110 | nsp, nsp->irq_vector); ); | |
4111 | ||
4112 | if (pay_attention) { | |
4113 | sp->async_event = true; | |
4114 | } | |
4115 | ||
4116 | return NCU_INT_ACK; | |
4117 | } | |
4118 | ||
4119 | ||
4120 | /* | |
4121 | * CPU specific instruction decode routine. This routine is called from the main | |
4122 | * instruction decoder routine only when that routine comes up empty handed (i.e. | |
4123 | * before declaring it an illegal or unknown instruction.) For now, we have not | |
4124 | * implemented any frequently used CPU specific instuctions implemented for | |
4125 | * Niagara 2, and so the performance impact of making this function call is | |
4126 | * negligable since it doesn't happen in the common case. | |
4127 | * | |
4128 | * This routine returns a pointer to the exec function which is to be run as a | |
4129 | * result of encountering the instruction op code in question. Other than that, | |
4130 | * it is designed to be very similar to the main instruction decode routine | |
4131 | * sparcv9_decode_me(). | |
4132 | */ | |
4133 | static op_funcp niagara2_decode_me(simcpu_t *sp, xicache_instn_t * xcip, uint32_t instn) | |
4134 | { | |
4135 | uint_t rs1, rd, rs2; | |
4136 | sint32_t simm; | |
4137 | T2o3_code_t op2c; | |
4138 | op_funcp exec_funcp; | |
4139 | ||
4140 | switch ((ty_code_t)X_OP(instn)) { | |
4141 | case Ty_0: /* Branches and Sethi */ | |
4142 | break; | |
4143 | case Ty_1: /* Call instructions */ | |
4144 | break; | |
4145 | case Ty_2: /* Arithmetic and Misc instructions */ | |
4146 | rs1 = X_RS1(instn); | |
4147 | rd = X_RD(instn); | |
4148 | op2c = (T2o3_code_t)X_OP3(instn); | |
4149 | ||
4150 | if (X_I(instn)) { | |
4151 | simm = X_SIMM13(instn); | |
4152 | /* register x immediate -> register forms */ | |
4153 | ||
4154 | switch ( op2c ) { | |
4155 | case T2o3_movcc : | |
4156 | if (!X_FMT4_CC2(instn)) { | |
4157 | FP_DECODE_FPU_ON_CHECK; | |
4158 | ||
4159 | if (rd == 0) goto n2_do_noop; | |
4160 | ||
4161 | /* We attempt to fast path movfcc_a ... */ | |
4162 | if (X_FMT4_COND(instn) == cond_n) goto n2_do_noop; | |
4163 | simm = X_SIMM11(instn); | |
4164 | if (X_FMT4_COND(instn) == cond_a) { | |
4165 | goto n2_do_move_simm; | |
4166 | } | |
4167 | SET_OP_MOVCC_CC(X_FMT4_CC(instn)); | |
4168 | SET_OP_SIMM16(simm); | |
4169 | SET_OP_RD(rd); | |
4170 | SET_OP_MOVCC_COND(X_FMT4_COND(instn)); | |
4171 | SET_OPv9(movfcc_imm); | |
4172 | goto n2_all_done; | |
4173 | } | |
4174 | ||
4175 | switch( (cc4bit_t)X_FMT4_CC(instn) ) { | |
4176 | case CC4bit_icc: SET_OP_MOVCC_CC(0); break; | |
4177 | case CC4bit_xcc: SET_OP_MOVCC_CC(1); break; | |
4178 | default: | |
4179 | SET_OP_ILL_REASON(movcc_illegal_cc_field); | |
4180 | goto n2_illegal_instruction; | |
4181 | } | |
4182 | ||
4183 | if (rd == 0) goto n2_do_noop; | |
4184 | ||
4185 | /* truncate simm - as only an 11 bit | |
4186 | * immediate in movcc instructions, not the | |
4187 | * 13 bit field we extracted above | |
4188 | */ | |
4189 | simm = X_SIMM11(instn); | |
4190 | ||
4191 | if (X_FMT4_COND(instn) == cond_n) goto n2_do_noop; | |
4192 | if (X_FMT4_COND(instn) == cond_a) goto n2_do_move_simm; | |
4193 | SET_OP_SIMM16(simm); | |
4194 | SET_OP_RD(rd); | |
4195 | SET_OP_MOVCC_COND(X_FMT4_COND(instn)); | |
4196 | SET_OPv9(movcc_imm); | |
4197 | goto n2_all_done; | |
4198 | case T2o3_mulscc : | |
4199 | SET_OPv9(mulscc_imm); | |
4200 | goto n2_do_imm; | |
4201 | case T2o3_popc : | |
4202 | SET_OPv9( popc_imm ); | |
4203 | simm = X_SIMM13(instn); | |
4204 | goto n2_do_imm; | |
4205 | case T2o3_rdasr : | |
4206 | /* Here I = 1 */ | |
4207 | if (rd == 0 && rs1==15) { | |
4208 | if (!CHECK_RESERVED_ZERO(instn, 12, 7)) { | |
4209 | SET_OP_ILL_REASON(misc_reserved_field_non_zero); | |
4210 | goto n2_illegal_instruction; | |
4211 | } | |
4212 | simm = X_MEMBAR_MASKS(instn); | |
4213 | SET_OP_SIMM16(simm); /* masks in immediates */ | |
4214 | SET_OPv9( membar ); | |
4215 | goto n2_all_done; | |
4216 | } | |
4217 | /* XXX if I = 1??? */ | |
4218 | SET_OPv9( read_state_reg ); | |
4219 | simm = 0; /* unused */ | |
4220 | goto n2_do_imm; | |
4221 | case T2o3_save : | |
4222 | SET_OPv9(save_imm); /* rd == 0 determined in instn implemenation */ | |
4223 | goto n2_do_imm; | |
4224 | case T2o3_restore : | |
4225 | SET_OPv9(restore_imm); | |
4226 | goto n2_do_imm; | |
4227 | case T2o3_return : | |
4228 | SET_OPv9( return_imm ); | |
4229 | goto n2_do_imm; | |
4230 | case T2o3_flush : | |
4231 | SET_OPv9(iflush_imm); | |
4232 | goto n2_do_imm; | |
4233 | ||
4234 | case T2o3_saved: | |
4235 | n2_saved_instn:; | |
4236 | { | |
4237 | int fcn = X_FMT2_FCN(instn); | |
4238 | ||
4239 | if (!CHECK_RESERVED_ZERO(instn, 18, 0)) { | |
4240 | SET_OP_ILL_REASON(saved_reserved_field_non_zero); | |
4241 | goto n2_illegal_instruction; | |
4242 | } | |
4243 | ||
4244 | switch (fcn) { | |
4245 | case 0: /* saved */ | |
4246 | SET_OPv9(saved); | |
4247 | break; | |
4248 | case 1: | |
4249 | SET_OPv9(restored); | |
4250 | break; | |
4251 | case 2: | |
4252 | SET_OPv9(allclean); | |
4253 | break; | |
4254 | case 3: | |
4255 | SET_OPv9(otherw); | |
4256 | break; | |
4257 | case 4: | |
4258 | SET_OPv9(normalw); | |
4259 | break; | |
4260 | case 5: | |
4261 | SET_OPv9(invalw); | |
4262 | break; | |
4263 | default: | |
4264 | SET_OP_ILL_REASON(saved_fcn_invalid); | |
4265 | goto n2_illegal_instruction; | |
4266 | } | |
4267 | goto n2_all_done; | |
4268 | } | |
4269 | ||
4270 | case T2o3_retry : | |
4271 | n2_done_retry_instn:; | |
4272 | switch(X_FMT3_FCN(instn)) { | |
4273 | case 0: | |
4274 | SET_OP_MISC_BITS((uint_t)true); | |
4275 | break; | |
4276 | case 1: | |
4277 | SET_OP_MISC_BITS((uint_t)false); | |
4278 | break; | |
4279 | default: | |
4280 | SET_OP_ILL_REASON(done_retry_illegal_fcn_field); | |
4281 | goto n2_illegal_instruction; | |
4282 | } | |
4283 | SET_OPv9(done_retry); | |
4284 | goto n2_all_done; | |
4285 | default: | |
4286 | break; | |
4287 | } | |
4288 | } else { | |
4289 | rs2 = X_RS2(instn); | |
4290 | /* register x register -> register forms */ | |
4291 | switch ( op2c ) { | |
4292 | case T2o3_mulscc : | |
4293 | SET_OPv9(mulscc_rrr); | |
4294 | goto n2_do_rrr; | |
4295 | case T2o3_popc : | |
4296 | SET_OPv9( popc_rrr ); | |
4297 | goto n2_do_rrr; | |
4298 | case T2o3_gop : | |
4299 | switch ((T3o3_fp36_opf_t)X_FP_OPF(instn)) { | |
4300 | case VISop36_bmask: | |
4301 | SET_OPv9(bmask); | |
4302 | goto n2_do_rrr; | |
4303 | case VISop36_bshuffle: | |
4304 | SET_OPv9(bshuffle); | |
4305 | goto n2_do_fp_s1d_s2d_dd; | |
4306 | case VISop36_fpack32: | |
4307 | SET_OPv9(fpack32); | |
4308 | goto n2_do_fp_s1d_s2d_dd; | |
4309 | case VISop36_fpack16: | |
4310 | SET_OPv9(fpack16); | |
4311 | goto n2_do_fp_s1d_s2d_ds; | |
4312 | case VISop36_fpackfix: | |
4313 | SET_OPv9(fpackfix); | |
4314 | goto n2_do_fp_s1d_s2d_ds; | |
4315 | case VISop36_pdist: | |
4316 | SET_OPv9(pdist); | |
4317 | goto n2_do_fp_s1d_s2d_dd; | |
4318 | case VISop36_fpmerge: | |
4319 | SET_OPv9(fpmerge); | |
4320 | goto n2_do_fp_s1s_s2s_dd; | |
4321 | case VISop36_fexpand: | |
4322 | SET_OPv9(fexpand); | |
4323 | goto n2_do_fp_s1s_s2s_dd; | |
4324 | case VISop36_array16: | |
4325 | SET_OPv9(array16); | |
4326 | goto n2_do_rrr; | |
4327 | case VISop36_array32: | |
4328 | SET_OPv9(array32); | |
4329 | goto n2_do_rrr; | |
4330 | case VISop36_array8: | |
4331 | SET_OPv9(array8); | |
4332 | goto n2_do_rrr; | |
4333 | case VISop36_edge16: | |
4334 | SET_OPv9(edge16); | |
4335 | goto n2_do_rrr; | |
4336 | case VISop36_edge16l: | |
4337 | SET_OPv9(edge16l); | |
4338 | goto n2_do_rrr; | |
4339 | case VISop36_edge16ln: | |
4340 | SET_OPv9(edge16ln); | |
4341 | goto n2_do_rrr; | |
4342 | case VISop36_edge16n: | |
4343 | SET_OPv9(edge16n); | |
4344 | goto n2_do_rrr; | |
4345 | case VISop36_edge32: | |
4346 | SET_OPv9(edge32); | |
4347 | goto n2_do_rrr; | |
4348 | case VISop36_edge32l: | |
4349 | SET_OPv9(edge32l); | |
4350 | goto n2_do_rrr; | |
4351 | case VISop36_edge32ln: | |
4352 | SET_OPv9(edge32ln); | |
4353 | goto n2_do_rrr; | |
4354 | case VISop36_edge32n: | |
4355 | SET_OPv9(edge32n); | |
4356 | goto n2_do_rrr; | |
4357 | case VISop36_edge8: | |
4358 | SET_OPv9(edge8); | |
4359 | goto n2_do_rrr; | |
4360 | case VISop36_edge8l: | |
4361 | SET_OPv9(edge8l); | |
4362 | goto n2_do_rrr; | |
4363 | case VISop36_edge8ln: | |
4364 | SET_OPv9(edge8ln); | |
4365 | goto n2_do_rrr; | |
4366 | case VISop36_edge8n: | |
4367 | SET_OPv9(edge8n); | |
4368 | goto n2_do_rrr; | |
4369 | case VISop36_fcmpeq16: | |
4370 | SET_OPv9(fcmpeq16); | |
4371 | goto n2_do_fp_s1d_s2d_dx; | |
4372 | case VISop36_fcmpeq32: | |
4373 | SET_OPv9(fcmpeq32); | |
4374 | goto n2_do_fp_s1d_s2d_dx; | |
4375 | case VISop36_fcmpgt16: | |
4376 | SET_OPv9(fcmpgt16); | |
4377 | goto n2_do_fp_s1d_s2d_dx; | |
4378 | case VISop36_fcmpgt32: | |
4379 | SET_OPv9(fcmpgt32); | |
4380 | goto n2_do_fp_s1d_s2d_dx; | |
4381 | case VISop36_fcmple16: | |
4382 | SET_OPv9(fcmple16); | |
4383 | goto n2_do_fp_s1d_s2d_dx; | |
4384 | case VISop36_fcmple32: | |
4385 | SET_OPv9(fcmple32); | |
4386 | goto n2_do_fp_s1d_s2d_dx; | |
4387 | case VISop36_fcmpne16: | |
4388 | SET_OPv9(fcmpne16); | |
4389 | goto n2_do_fp_s1d_s2d_dx; | |
4390 | case VISop36_fcmpne32: | |
4391 | SET_OPv9(fcmpne32); | |
4392 | goto n2_do_fp_s1d_s2d_dx; | |
4393 | case VISop36_fmul8sux16: | |
4394 | SET_OPv9(fmul8sux16); | |
4395 | goto n2_do_fp_s1d_s2d_dd; | |
4396 | case VISop36_fmul8ulx16: | |
4397 | SET_OPv9(fmul8ulx16); | |
4398 | goto n2_do_fp_s1d_s2d_dd; | |
4399 | case VISop36_fmul8x16: | |
4400 | SET_OPv9(fmul8x16); | |
4401 | goto n2_do_fp_s1s_s2d_dd; | |
4402 | case VISop36_fmul8x16al: | |
4403 | SET_OPv9(fmul8x16al); | |
4404 | goto n2_do_fp_s1s_s2s_dd; | |
4405 | case VISop36_fmul8x16au: | |
4406 | SET_OPv9(fmul8x16au); | |
4407 | goto n2_do_fp_s1s_s2s_dd; | |
4408 | case VISop36_fmuld8sux16: | |
4409 | SET_OPv9(fmuld8sux16); | |
4410 | goto n2_do_fp_s1s_s2s_dd; | |
4411 | case VISop36_fmuld8ulx16: | |
4412 | SET_OPv9(fmuld8ulx16); | |
4413 | goto n2_do_fp_s1s_s2s_dd; | |
4414 | default: | |
4415 | break; | |
4416 | } | |
4417 | goto n2_unimplemented_visop; | |
4418 | case T2o3_save : | |
4419 | SET_OPv9(save_rrr); /* rd == 0 determined in instn implemenation */ | |
4420 | goto n2_do_rrr; | |
4421 | case T2o3_restore : | |
4422 | /* Rd == 0 handled by instruction */ | |
4423 | SET_OPv9(restore_rrr); | |
4424 | goto n2_do_rrr; | |
4425 | case T2o3_return : | |
4426 | SET_OPv9( return_rrr ); | |
4427 | goto n2_do_rrr; | |
4428 | case T2o3_flush : | |
4429 | if (rd != 0) | |
4430 | goto n2_illegal_instruction; | |
4431 | SET_OPv9(iflush_rr); | |
4432 | goto n2_do_rrr; | |
4433 | case T2o3_saved: | |
4434 | goto n2_saved_instn; | |
4435 | case T2o3_retry : | |
4436 | goto n2_done_retry_instn; | |
4437 | default: | |
4438 | break; | |
4439 | } | |
4440 | } | |
4441 | break; | |
4442 | case Ty_3: /* Principally load/store operations */ | |
4443 | break; | |
4444 | default: | |
4445 | break; | |
4446 | } | |
4447 | ||
4448 | n2_unknown_decode:; | |
4449 | return (NULL); | |
4450 | ||
4451 | #ifdef FP_DECODE_DISABLED | |
4452 | n2_fp_disabled:; | |
4453 | SET_OPv9(fp_unimplemented_instruction); | |
4454 | goto n2_all_done; | |
4455 | #endif /* FP_DECODE_DISABLED */ | |
4456 | ||
4457 | n2_unimplemented_visop: | |
4458 | SET_OP_ILL_REASON(unimplemented_visop); | |
4459 | goto n2_illegal_instruction; | |
4460 | ||
4461 | n2_do_imm: | |
4462 | SET_OP_RD(rd); | |
4463 | SET_OP_RS1(rs1); | |
4464 | SET_OP_SIMM16(simm); | |
4465 | goto n2_all_done; | |
4466 | ||
4467 | n2_do_move_simm: | |
4468 | SET_OP( move_simm ); | |
4469 | SET_OP_RD(rd); | |
4470 | SET_OP_SIMM32(simm); | |
4471 | goto n2_all_done; | |
4472 | ||
4473 | n2_do_rrr: | |
4474 | SET_OP_RD(rd); | |
4475 | SET_OP_RS1(rs1); | |
4476 | SET_OP_RS2(rs2); | |
4477 | goto n2_all_done; | |
4478 | ||
4479 | n2_do_noop: | |
4480 | SET_OP( noop ); | |
4481 | goto n2_all_done; | |
4482 | ||
4483 | n2_do_fp_s1d_s2d_ds: | |
4484 | RESCALEFPREG(rs1); | |
4485 | RESCALEFPREG(rs2); | |
4486 | rd = FP_32_INDX(rd); | |
4487 | SET_OP_FPRS1(rs1); | |
4488 | SET_OP_FPRS2(rs2); | |
4489 | SET_OP_FPRD(rd); | |
4490 | goto n2_all_done; | |
4491 | ||
4492 | n2_do_fp_s1d_s2d_dd: | |
4493 | RESCALEFPREG(rs1); | |
4494 | RESCALEFPREG(rs2); | |
4495 | RESCALEFPREG(rd); | |
4496 | SET_OP_FPRS1(rs1); | |
4497 | SET_OP_FPRS2(rs2); | |
4498 | SET_OP_FPRD(rd); | |
4499 | goto n2_all_done; | |
4500 | ||
4501 | n2_do_fp_s1s_s2s_dd: | |
4502 | rs1 = FP_32_INDX(rs1); | |
4503 | SET_OP_FPRS1( rs1 ); | |
4504 | rs2 = FP_32_INDX(rs2); | |
4505 | SET_OP_FPRS2( rs2 ); | |
4506 | RESCALEFPREG(rd); | |
4507 | SET_OP_FPRD( rd ); | |
4508 | goto n2_all_done; | |
4509 | ||
4510 | n2_do_fp_s1s_s2d_dd: | |
4511 | rs1 = FP_32_INDX(rs1); | |
4512 | RESCALEFPREG(rs2); | |
4513 | RESCALEFPREG(rd); | |
4514 | SET_OP_FPRS1(rs1); | |
4515 | SET_OP_FPRS2(rs2); | |
4516 | SET_OP_FPRD(rd); | |
4517 | goto n2_all_done; | |
4518 | ||
4519 | n2_do_fp_s1d_s2d_dx: | |
4520 | RESCALEFPREG(rs1); | |
4521 | RESCALEFPREG(rs2); | |
4522 | SET_OP_FPRS1(rs1); | |
4523 | SET_OP_FPRS2(rs2); | |
4524 | SET_OP_RD(rd); | |
4525 | goto n2_all_done; | |
4526 | ||
4527 | n2_illegal_instruction: | |
4528 | SET_OPv9(illegal_instruction); | |
4529 | ||
4530 | n2_all_done:; | |
4531 | return (exec_funcp); | |
4532 | } | |
4533 | ||
4534 | ||
4535 | void niagara2_get_pseudo_dev(config_proc_t *config_procp, char *dev_namep, void *devp) | |
4536 | { | |
4537 | ss_proc_t *npp; | |
4538 | ||
4539 | npp = (ss_proc_t *)config_procp->procp; | |
4540 | ||
4541 | if (strcmp(dev_namep, PSEUDO_DEV_NAME_NCU) == 0) { | |
4542 | *((void **) devp) = (void *) npp->ncup; | |
4543 | } else if (strcmp(dev_namep, PSEUDO_DEV_NAME_CCU) == 0) { | |
4544 | *((void **) devp) = (void *) npp->clockp; | |
4545 | } else if (strcmp(dev_namep, PSEUDO_DEV_NAME_MCU) == 0) { | |
4546 | *((void **) devp) = (void *) npp->mbankp; | |
4547 | } else if (strcmp(dev_namep, PSEUDO_DEV_NAME_L2C) == 0) { | |
4548 | *((void **) devp) = (void *) npp->l2p; | |
4549 | } else if (strcmp(dev_namep, PSEUDO_DEV_NAME_SSI) == 0) { | |
4550 | *((void **) devp) = (void *) npp->ssip; | |
4551 | } else { | |
4552 | ASSERT(0); | |
4553 | } | |
4554 | } | |
4555 | ||
4556 | ||
4557 | /* | |
4558 | * Perform any processor specific parsing for "proc" elements in | |
4559 | * Legion config file. Returns true if token was handled by this | |
4560 | * function, false otherwise. | |
4561 | */ | |
4562 | bool_t | |
4563 | ss_parse_proc_entry(ss_proc_t *procp, domain_t *domainp) | |
4564 | { | |
4565 | #ifdef VFALLS /* { */ | |
4566 | if (streq(lex.strp, "node_id")) { | |
4567 | uint_t node_id; | |
4568 | ||
4569 | node_id = parse_number_assign(); | |
4570 | procp->config_procp->proc_id = node_id; | |
4571 | ||
4572 | if (node_id > MAX_NODEID) | |
4573 | fatal("Invalid node_id %d in VF config file\n", node_id); | |
4574 | /* Handled a Vfalls specific element */ | |
4575 | return true; | |
4576 | } | |
4577 | ||
4578 | #endif /* } VFALLS */ | |
4579 | ||
4580 | /* Didn't match any Niagara2 specific element */ | |
4581 | return false; | |
4582 | } | |
4583 | ||
4584 | /* Perform any post parsing check that need to be made to the domain */ | |
4585 | ||
4586 | void niagara2_domain_check(domain_t * domainp) | |
4587 | { | |
4588 | ||
4589 | #ifdef VFALLS /* { */ | |
4590 | /* | |
4591 | * Currently, VF nodes in conf files should be sequential, starting | |
4592 | * from node 0 and not duplicated. See Section 11.9.1.16e of VF PRM Rev 0.1 | |
4593 | * If no node_id field is specified in the conf file, then by default, | |
4594 | * sequential node_id's will be assigned. Otherwise, they can be defined | |
4595 | * in the conf file proc section by using the node_id field. | |
4596 | */ | |
4597 | uint_t i, node_id; | |
4598 | bool_t node[MAX_NODEID + 1] = {false, false, false, false}; | |
4599 | bool_t node_check = true; | |
4600 | ss_proc_t *npp; | |
4601 | ||
4602 | for (i = 0; i < domainp->procs.count; i++) { | |
4603 | node_id = LIST_ENTRY(domainp->procs, i)->proc_id; | |
4604 | node[node_id] = true; | |
4605 | npp = (ss_proc_t *)LIST_ENTRY(domainp->procs, i)->procp; | |
4606 | if ((domainp->procs.count > 1)) { | |
4607 | npp->global_addressing_ok.flags.rsvd = GLOBAL_ADDRESSING_FLAG_EN; | |
4608 | npp->global_addressing_ok.flags.multi_chip = GLOBAL_ADDRESSING_FLAG_EN; | |
4609 | npp->global_addressing_ok.flags.lfu = GLOBAL_ADDRESSING_FLAG_DIS; | |
4610 | npp->global_addressing_ok.flags.zambezi = GLOBAL_ADDRESSING_FLAG_EN; | |
4611 | } else | |
4612 | npp->global_addressing_ok.all = 0x0; | |
4613 | } | |
4614 | ||
4615 | for (i = 0; i < domainp->procs.count; i++) | |
4616 | node_check &= node[i]; | |
4617 | if (!node_check) { | |
4618 | EXEC_WARNING(("Please make sure that processor node ids" | |
4619 | " in .conf file\n are sequential and unique" | |
4620 | " with node 0 being the lowest node\n")); | |
4621 | } | |
4622 | #endif /* } VFALLS */ | |
4623 | ||
4624 | } |