| 1 | /* |
| 2 | * ========== Copyright Header Begin ========================================== |
| 3 | * |
| 4 | * OpenSPARC T2 Processor File: niagara.c |
| 5 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. |
| 6 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES. |
| 7 | * |
| 8 | * The above named program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public |
| 10 | * License version 2 as published by the Free Software Foundation. |
| 11 | * |
| 12 | * The above named program is distributed in the hope that it will be |
| 13 | * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU General Public |
| 18 | * License along with this work; if not, write to the Free Software |
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. |
| 20 | * |
| 21 | * ========== Copyright Header End ============================================ |
| 22 | */ |
| 23 | /* |
| 24 | * Copyright 2007 Sun Microsystems, Inc. All rights reserved. |
| 25 | * Use is subject to license terms. |
| 26 | */ |
| 27 | #pragma ident "@(#)niagara.c 1.62 07/02/28 SMI" |
| 28 | |
| 29 | #include <stdio.h> |
| 30 | #include <stdlib.h> |
| 31 | #include <unistd.h> |
| 32 | #include <string.h> /* memcpy/memset */ |
| 33 | #include <strings.h> |
| 34 | #include <thread.h> |
| 35 | |
| 36 | #include "ss_common.h" |
| 37 | #include "jbus_mondo.h" |
| 38 | #include "niagara.h" |
| 39 | #include "fpsim.h" |
| 40 | |
| 41 | #if INTERNAL_BUILD |
| 42 | #include "modarith.h" |
| 43 | #endif |
| 44 | |
| 45 | |
| 46 | static void niagara_init_trap_list(); |
| 47 | static bool_t niagara_init_proc_type(proc_type_t * proc_typep); |
| 48 | static op_funcp niagara_decode_me(simcpu_t *sp, xicache_instn_t * xcip, uint32_t instn); |
| 49 | static void niagara_get_pseudo_dev(config_proc_t *config_procp, char *dev_namep, void *devp); |
| 50 | static void niagara_send_xirq(simcpu_t * sp, uint64_t val); |
| 51 | static void niagara_set_sfsr(simcpu_t *sp, ss_mmu_t *mmup, tvaddr_t addr, |
| 52 | uint_t ft, ss_ctx_t ct, uint_t asi, uint_t w, uint_t e); |
| 53 | static void niagara_domain_check(domain_t *domainp); |
| 54 | |
| 55 | static void niagara_init_trap_list() |
| 56 | { |
| 57 | static ss_trap_list_t setup_list[] = { |
| 58 | |
| 59 | /* Priorities 0 = highest, XX = Lowest */ |
| 60 | /* Number Name Priority User Priv HPriv */ |
| 61 | |
| 62 | /* 0x00 */ { T( legion_save_state ), Pri( 0, 0), H, H, H }, |
| 63 | /* 0x01 */ { T( power_on_reset ), Pri( 0, 0), H, H, H }, |
| 64 | /* 0x02 */ { T( watchdog_reset ), Pri( 1, 0), H, H, H }, |
| 65 | /* 0x03 */ { T( externally_initiated_reset ), Pri( 1, 0), H, H, H }, |
| 66 | /* 0x04 */ { T( software_initiated_reset ), Pri( 1, 0), H, H, H }, |
| 67 | /* 0x05 */ { T( RED_state_exception ), Pri( 1, 0), H, H, H }, |
| 68 | |
| 69 | /* 0x08 */ { T( instruction_access_exception ), Pri( 5, 0), H, H, X }, |
| 70 | /* 0x09 */ { T( instruction_access_MMU_miss ), Pri( 2,16), SW, SW, SW }, |
| 71 | /* 0x0a */ { T( instruction_access_error ), Pri( 3, 0), H, H, H }, |
| 72 | |
| 73 | /* 0x10 */ { T( illegal_instruction ), Pri( 7, 0), H, H, H }, |
| 74 | /* 0x11 */ { T( privileged_opcode ), Pri( 6, 0), P, X, X }, |
| 75 | |
| 76 | /* LDD and STD are in fact implemented by niagara */ |
| 77 | /* 0x12 */ { T( unimplemented_LDD ), Pri( 6, 0), X, X, X }, /* error if received by hypervisor. */ |
| 78 | /* 0x13 */ { T( unimplemented_STD ), Pri( 6, 0), X, X, X }, /* error if received by hypervisor. */ |
| 79 | |
| 80 | |
| 81 | |
| 82 | /* 0x20 */ { T( fp_disabled ), Pri( 8, 0), P, P, UH }, /* error if received by hypervisor. */ |
| 83 | /* 0x21 */ { T( fp_exception_ieee_754 ), Pri(11, 0), P, P, UH }, /* error if received by hypervisor. */ |
| 84 | /* 0x22 */ { T( fp_exception_other ), Pri(11, 0), P, P, UH }, /* error if received by hypervisor. */ |
| 85 | /* 0x23 */ { T( tag_overflow ), Pri(14, 0), P, P, UH }, /* error if received by hypervisor. */ |
| 86 | /* 0x24 */ { T( clean_window ), Pri(10, 0), P, P, UH }, /* error if received by hypervisor - windows not used. */ |
| 87 | |
| 88 | /* 0x28 */ { T( division_by_zero ), Pri(15, 0), P, P, UH }, /* error if received by hypervisor. */ |
| 89 | /* 0x29 */ { T( internal_processor_error ), Pri( 4, 0), H, H, H }, /* generated by register parity errors */ |
| 90 | |
| 91 | /* 0x30 */ { T( data_access_exception ), Pri(12, 0), H, H, UH }, /* error if received by hypervisor - MMU not used. */ |
| 92 | /* 0x31 */ { T( data_access_MMU_miss ), Pri(12, 0), SW, SW, SW }, /* Should not be generated by hardware */ |
| 93 | /* 0x32 */ { T( data_access_error ), Pri(12, 0), H, H, H }, /* handle error and generate report to appropriate supervisor. */ |
| 94 | /* 0x33 */ { T( data_access_protection ), Pri(12, 0), H, H, H }, /* error if received by hypervisor - MMU not used. */ |
| 95 | /* 0x34 */ { T( mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */ |
| 96 | /* 0x35 */ { T( LDDF_mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */ |
| 97 | /* 0x36 */ { T( STDF_mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */ |
| 98 | /* 0x37 */ { T( privileged_action ), Pri(11, 0), H, X, X }, /* error if received from hypervisor. */ |
| 99 | /* 0x38 */ { T( LDQF_mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */ |
| 100 | /* 0x39 */ { T( STQF_mem_address_not_aligned ), Pri(10, 0), H, H, UH }, /* error if received by hypervisor. */ |
| 101 | |
| 102 | /* 0x3e */ { T( instruction_real_translation_miss ), Pri(2, 0), H, H, H }, /* real to pa entry not found in ITLB */ |
| 103 | /* 0x3f */ { T( data_real_translation_miss ), Pri(12, 0), H, H, H }, /* real to pa entry not found in DTLB */ |
| 104 | |
| 105 | /* this one ever generated ? */ |
| 106 | /* 0x40 */ { T( async_data_error ), Pri( 2, 0), H, H, H }, /* remap to sun4v error report */ |
| 107 | |
| 108 | /* 0x41 */ { T( interrupt_level_1 ), Pri(31, 0), P, P, X }, |
| 109 | /* 0x42 */ { T( interrupt_level_2 ), Pri(30, 0), P, P, X }, |
| 110 | /* 0x43 */ { T( interrupt_level_3 ), Pri(29, 0), P, P, X }, |
| 111 | /* 0x44 */ { T( interrupt_level_4 ), Pri(28, 0), P, P, X }, |
| 112 | /* 0x45 */ { T( interrupt_level_5 ), Pri(27, 0), P, P, X }, |
| 113 | /* 0x46 */ { T( interrupt_level_6 ), Pri(26, 0), P, P, X }, |
| 114 | /* 0x47 */ { T( interrupt_level_7 ), Pri(25, 0), P, P, X }, |
| 115 | /* 0x48 */ { T( interrupt_level_8 ), Pri(24, 0), P, P, X }, |
| 116 | /* 0x49 */ { T( interrupt_level_9 ), Pri(23, 0), P, P, X }, |
| 117 | /* 0x4a */ { T( interrupt_level_a ), Pri(22, 0), P, P, X }, |
| 118 | /* 0x4b */ { T( interrupt_level_b ), Pri(21, 0), P, P, X }, |
| 119 | /* 0x4c */ { T( interrupt_level_c ), Pri(20, 0), P, P, X }, |
| 120 | /* 0x4d */ { T( interrupt_level_d ), Pri(19, 0), P, P, X }, |
| 121 | /* 0x4e */ { T( interrupt_level_e ), Pri(18, 0), P, P, X }, |
| 122 | /* 0x4f */ { T( interrupt_level_f ), Pri(17, 0), P, P, X }, |
| 123 | |
| 124 | /* 0x5e */ { T( hstick_match ), Pri( 2, 0), H, H, H }, |
| 125 | /* 0x5f */ { T( trap_level_zero ), Pri( 2, 8), H, H, X }, /* This trap requires TL==0, priv==1 and hpriv==0 */ |
| 126 | |
| 127 | /* 0x60 */ { T( interrupt_vector_trap ), Pri(16, 0), H, H, H }, /* handle & remap to sun4v as appropriate mondo queue */ |
| 128 | /* 0x61 */ { T( RA_watchpoint ), Pri(12, 0), SW, SW, SW }, /* not used by hypervisor, so error if received from hypervisor. */ |
| 129 | /* 0x62 */ { T( VA_watchpoint ), Pri(11, 0), P, P, X }, /* error - VA watchpoints should be pended if hpriv=1 */ |
| 130 | /* 0x63 */ { T( ECC_error ), Pri(33, 0), H, H, H }, /* handle & create sun4v error report(s) */ |
| 131 | /* 0x64 */ { T( fast_instruction_access_MMU_miss ), Pri( 2,24), H, H, H }, /* handle & proper TSB check. */ |
| 132 | /* 0x68 */ { T( fast_data_access_MMU_miss ), Pri(12, 0), H, H, H }, /* handle & proper TSB check. */ |
| 133 | /* 0x6c */ { T( fast_data_access_protection ), Pri(12, 0), H, H, H }, /* handle & proper TSB check. */ |
| 134 | /* 0x74 */ { TN1( modular_arithmetic ), Pri(16, 1), H, H, H }, |
| 135 | /* 0x76 */ { T( instruction_breakpoint ), Pri(7, 1), H, H, H }, |
| 136 | /* 0x78 */ { TN1( data_error ), Pri(13, 0), H, H, H }, |
| 137 | /* 0x7c */ { T( cpu_mondo_trap ), Pri(16, 2), P, P, X }, |
| 138 | /* 0x7d */ { T( dev_mondo_trap ), Pri(16, 3), P, P, X }, |
| 139 | /* 0x7e */ { T( resumable_error ), Pri(33, 0), P, P, X }, |
| 140 | /* faked by the hypervisor */ |
| 141 | /* 0x7f */ { T( nonresumable_error ), Pri( 4, 0), SW, SW, SW }, |
| 142 | |
| 143 | /* 0x80 */ { T( spill_0_normal ), Pri( 9, 0), P, P, UH }, |
| 144 | /* 0x84 */ { T( spill_1_normal ), Pri( 9, 0), P, P, UH }, |
| 145 | /* 0x88 */ { T( spill_2_normal ), Pri( 9, 0), P, P, UH }, |
| 146 | /* 0x8c */ { T( spill_3_normal ), Pri( 9, 0), P, P, UH }, |
| 147 | /* 0x90 */ { T( spill_4_normal ), Pri( 9, 0), P, P, UH }, |
| 148 | /* 0x94 */ { T( spill_5_normal ), Pri( 9, 0), P, P, UH }, |
| 149 | /* 0x98 */ { T( spill_6_normal ), Pri( 9, 0), P, P, UH }, |
| 150 | /* 0x9c */ { T( spill_7_normal ), Pri( 9, 0), P, P, UH }, |
| 151 | |
| 152 | /* 0xa0 */ { T( spill_0_other ), Pri( 9, 0), P, P, UH }, |
| 153 | /* 0xa4 */ { T( spill_1_other ), Pri( 9, 0), P, P, UH }, |
| 154 | /* 0xa8 */ { T( spill_2_other ), Pri( 9, 0), P, P, UH }, |
| 155 | /* 0xac */ { T( spill_3_other ), Pri( 9, 0), P, P, UH }, |
| 156 | /* 0xb0 */ { T( spill_4_other ), Pri( 9, 0), P, P, UH }, |
| 157 | /* 0xb4 */ { T( spill_5_other ), Pri( 9, 0), P, P, UH }, |
| 158 | /* 0xb8 */ { T( spill_6_other ), Pri( 9, 0), P, P, UH }, |
| 159 | /* 0xbc */ { T( spill_7_other ), Pri( 9, 0), P, P, UH }, |
| 160 | |
| 161 | /* 0xc0 */ { T( fill_0_normal ), Pri( 9, 0), P, P, UH }, |
| 162 | /* 0xc4 */ { T( fill_1_normal ), Pri( 9, 0), P, P, UH }, |
| 163 | /* 0xc8 */ { T( fill_2_normal ), Pri( 9, 0), P, P, UH }, |
| 164 | /* 0xcc */ { T( fill_3_normal ), Pri( 9, 0), P, P, UH }, |
| 165 | /* 0xd0 */ { T( fill_4_normal ), Pri( 9, 0), P, P, UH }, |
| 166 | /* 0xd4 */ { T( fill_5_normal ), Pri( 9, 0), P, P, UH }, |
| 167 | /* 0xd8 */ { T( fill_6_normal ), Pri( 9, 0), P, P, UH }, |
| 168 | /* 0xdc */ { T( fill_7_normal ), Pri( 9, 0), P, P, UH }, |
| 169 | |
| 170 | /* 0xe0 */ { T( fill_0_other ), Pri( 9, 0), P, P, UH }, |
| 171 | /* 0xe4 */ { T( fill_1_other ), Pri( 9, 0), P, P, UH }, |
| 172 | /* 0xe8 */ { T( fill_2_other ), Pri( 9, 0), P, P, UH }, |
| 173 | /* 0xec */ { T( fill_3_other ), Pri( 9, 0), P, P, UH }, |
| 174 | /* 0xf0 */ { T( fill_4_other ), Pri( 9, 0), P, P, UH }, |
| 175 | /* 0xf4 */ { T( fill_5_other ), Pri( 9, 0), P, P, UH }, |
| 176 | /* 0xf8 */ { T( fill_6_other ), Pri( 9, 0), P, P, UH }, |
| 177 | /* 0xfc */ { T( fill_7_other ), Pri( 9, 0), P, P, UH }, |
| 178 | |
| 179 | /*0x100-0x17f*/{T( trap_instruction ), Pri(16,32), P, P, H }, /* hv1: handles hypervisor traps only. Error if received from hypervisor. */ |
| 180 | /*0x180-0x1ff*/{T( htrap_instruction ), Pri(16,32), X, H, UH }, /* used to implement the supervisor to hypervisor API call. */ |
| 181 | |
| 182 | #undef T |
| 183 | #undef TN1 |
| 184 | #undef TN2 |
| 185 | #undef TRK |
| 186 | #undef X |
| 187 | #undef SW |
| 188 | #undef P |
| 189 | #undef H |
| 190 | #undef UH |
| 191 | #undef Pri |
| 192 | |
| 193 | { -1, (char*)0 }, |
| 194 | |
| 195 | }; |
| 196 | uint_t i; |
| 197 | |
| 198 | for (i=0; setup_list[i].trap_type != -1; i++) { |
| 199 | ASSERT( setup_list[i].trap_type>=SS_trap_legion_save_state && setup_list[i].trap_type<SS_trap_illegal_value ); |
| 200 | |
| 201 | ss_trap_list[ setup_list[i].trap_type ] = setup_list[i]; |
| 202 | } |
| 203 | |
| 204 | /* Now clone the trap instruction entries */ |
| 205 | |
| 206 | for (i=0x101; i<0x180; i++) { |
| 207 | ss_trap_list[ i ] = ss_trap_list[ 0x100 ]; |
| 208 | ss_trap_list[ i ].trap_type = i; |
| 209 | } |
| 210 | |
| 211 | for (i=0x181; i<0x200; i++) { |
| 212 | ss_trap_list[ i ] = ss_trap_list[ 0x180 ]; |
| 213 | ss_trap_list[ i ].trap_type = i; |
| 214 | } |
| 215 | } |
| 216 | |
| 217 | extern struct fpsim_functions fpsim_funclist; |
| 218 | |
| 219 | proc_type_t proc_type_niagara={ |
| 220 | "niagara", |
| 221 | false, /* module initialised */ |
| 222 | niagara_init_proc_type, |
| 223 | |
| 224 | /* config support */ |
| 225 | ss_parse, |
| 226 | ss_init, |
| 227 | ss_dump, |
| 228 | |
| 229 | /* execution support functions */ |
| 230 | ss_dbgr_regread, |
| 231 | ss_dbgr_regwrite, |
| 232 | ss_exec_setup, |
| 233 | ss_exec_cleanup, |
| 234 | ss_save_state, |
| 235 | |
| 236 | ss_check_async_event, |
| 237 | ss_take_exception, |
| 238 | |
| 239 | |
| 240 | #if ERROR_INJECTION |
| 241 | ss_error_condition, |
| 242 | #endif |
| 243 | #if ERROR_TRAP_GEN /* { */ |
| 244 | trigger_error_trap, |
| 245 | ss_error_reload_file, |
| 246 | ss_error_dump_active, |
| 247 | ss_error_dump_supported, |
| 248 | #endif /* } */ |
| 249 | n1_sp_interrupt, |
| 250 | |
| 251 | niagara_decode_me, |
| 252 | |
| 253 | /* pointer to fpsim instructions */ |
| 254 | &fpsim_funclist, |
| 255 | |
| 256 | /* performance measuring funcs */ |
| 257 | sparcv9_perf_dump, |
| 258 | |
| 259 | /* dump tlb, instruction counts etc */ |
| 260 | ss_dump_tlbs, |
| 261 | ss_dump_instruction_counts, |
| 262 | |
| 263 | /* external interface methods */ |
| 264 | ss_ext_signal, |
| 265 | ss_get_cpuid, |
| 266 | niagara_get_pseudo_dev, |
| 267 | ss_dev_mem_access, |
| 268 | |
| 269 | /* debugger interface methods */ |
| 270 | ss_dbgr_attach, |
| 271 | ss_dbgr_detach, |
| 272 | |
| 273 | ss_dbgr_mem_read, |
| 274 | ss_dbgr_mem_write, |
| 275 | ss_dbgr_mem_clear, |
| 276 | |
| 277 | ss_dbgr_set_break, |
| 278 | ss_dbgr_clear_break, |
| 279 | niagara_domain_check, |
| 280 | |
| 281 | sparcv9_reg_map, |
| 282 | |
| 283 | NULL, /* debug_hookp */ |
| 284 | NULL, /* debug_hook_dumpp */ |
| 285 | |
| 286 | CPU_MAGIC |
| 287 | }; |
| 288 | |
| 289 | /* |
| 290 | * Niagara uses registers located at magic addresses in its physical address |
| 291 | * space to control functional units placed outside the direct processor core. |
| 292 | * We emulate these with pseudo devices that are created implicitly when a Niagara |
| 293 | * is declared. |
| 294 | * To support this we have a number of device and function definitions below. |
| 295 | */ |
| 296 | static void ss_clock_init(config_dev_t *); |
| 297 | static void ss_dram_ctl_init(config_dev_t *); |
| 298 | static void ss_iob_init(config_dev_t *); |
| 299 | static void ss_jbi_init(config_dev_t *); |
| 300 | static void ss_jbus_init(config_dev_t *); |
| 301 | static void ss_l2_ctl_init(config_dev_t *); |
| 302 | static void ss_ssi_init(config_dev_t *); |
| 303 | |
| 304 | static bool_t ss_clock_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp); |
| 305 | static bool_t ss_dram_ctl_access(simcpu_t *, config_addr_t *, tpaddr_t offset, maccess_t op, uint64_t * regp); |
| 306 | static bool_t ss_iob_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp); |
| 307 | static bool_t ss_jbi_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp); |
| 308 | static bool_t ss_jbus_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp); |
| 309 | static bool_t ss_l2_ctl_access(simcpu_t *, config_addr_t *, tpaddr_t offset, maccess_t op, uint64_t * regp); |
| 310 | static bool_t ss_ssi_access(simcpu_t *, config_addr_t *, tpaddr_t off, maccess_t op, uint64_t * regp); |
| 311 | |
| 312 | static dev_type_t dev_type_ss_clock = { |
| 313 | "ss_clock", |
| 314 | NULL, /* parse */ |
| 315 | ss_clock_init, |
| 316 | NULL, /* dump */ |
| 317 | generic_device_non_cacheable, |
| 318 | ss_clock_access, |
| 319 | DEV_MAGIC |
| 320 | }; |
| 321 | static dev_type_t dev_type_ss_dram_ctl = { |
| 322 | "ss_memory_ctl", |
| 323 | NULL, /* parse */ |
| 324 | ss_dram_ctl_init, |
| 325 | NULL, /* dump */ |
| 326 | generic_device_non_cacheable, |
| 327 | ss_dram_ctl_access, |
| 328 | DEV_MAGIC |
| 329 | }; |
| 330 | static dev_type_t dev_type_ss_l2_ctl = { |
| 331 | "ss_l2_ctl", |
| 332 | NULL, /* parse */ |
| 333 | ss_l2_ctl_init, |
| 334 | NULL, /* dump */ |
| 335 | generic_device_non_cacheable, |
| 336 | ss_l2_ctl_access, |
| 337 | DEV_MAGIC |
| 338 | }; |
| 339 | static dev_type_t dev_type_ss_iob = { |
| 340 | "ss_iob", |
| 341 | NULL, /* parse */ |
| 342 | ss_iob_init, |
| 343 | NULL, /* dump */ |
| 344 | generic_device_non_cacheable, |
| 345 | ss_iob_access, |
| 346 | DEV_MAGIC |
| 347 | }; |
| 348 | static dev_type_t dev_type_ss_jbi = { |
| 349 | "ss_jbi", |
| 350 | NULL, /* parse */ |
| 351 | ss_jbi_init, |
| 352 | NULL, /* dump */ |
| 353 | generic_device_non_cacheable, |
| 354 | ss_jbi_access, |
| 355 | DEV_MAGIC |
| 356 | }; |
| 357 | static dev_type_t dev_type_ss_jbus = { |
| 358 | "ss_jbus", |
| 359 | NULL, /* parse */ |
| 360 | ss_jbus_init, |
| 361 | NULL, /* dump */ |
| 362 | generic_device_non_cacheable, |
| 363 | ss_jbus_access, |
| 364 | DEV_MAGIC |
| 365 | }; |
| 366 | static dev_type_t dev_type_ss_ssi = { |
| 367 | "ss_ssi", |
| 368 | NULL, /* parse */ |
| 369 | ss_ssi_init, |
| 370 | NULL, /* dump */ |
| 371 | generic_device_non_cacheable, |
| 372 | ss_ssi_access, |
| 373 | DEV_MAGIC |
| 374 | }; |
| 375 | |
| 376 | |
| 377 | /* |
| 378 | * Perform any processor specific parsing for "proc" elements in |
| 379 | * Legion config file. Returns true if token was handled by this |
| 380 | * function, false otherwise. |
| 381 | */ |
| 382 | bool_t |
| 383 | ss_parse_proc_entry(ss_proc_t *procp, domain_t *domainp) |
| 384 | { |
| 385 | if (streq(lex.strp,"rust_jbi_stores")) { |
| 386 | procp->rust_jbi_stores = true; |
| 387 | lex_get(T_S_Colon); |
| 388 | } else { |
| 389 | /* Didn't match any Niagara specific element */ |
| 390 | return false; |
| 391 | } |
| 392 | |
| 393 | /* Handled some Niagara specific element */ |
| 394 | return true; |
| 395 | } |
| 396 | |
| 397 | /* |
| 398 | * Set up the pseudo physical devices specific for N1. |
| 399 | */ |
| 400 | void ss_setup_pseudo_devs(domain_t * domainp, ss_proc_t *procp) |
| 401 | { |
| 402 | config_dev_t *pd, *overlapp; |
| 403 | |
| 404 | /* |
| 405 | * Clock Unit |
| 406 | */ |
| 407 | procp->clockp = Xcalloc(1, ss_clock_t); |
| 408 | |
| 409 | pd = Xcalloc(1, config_dev_t); |
| 410 | pd->is_implied = true; |
| 411 | pd->dev_typep = &dev_type_ss_clock; |
| 412 | pd->devp = (void*)procp; |
| 413 | procp->clock_devp = pd; |
| 414 | |
| 415 | insert_domain_address(domainp, pd, 0x9600000000LL, |
| 416 | 0x9600000000LL+0x100000000LL); |
| 417 | |
| 418 | overlapp = insert_domain_device(domainp, pd); |
| 419 | if (overlapp != NULL) { |
| 420 | lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx", |
| 421 | overlapp->dev_typep->dev_type_namep, |
| 422 | overlapp->addrp->baseaddr, |
| 423 | pd->dev_typep->dev_type_namep, |
| 424 | pd->addrp->baseaddr); |
| 425 | } |
| 426 | |
| 427 | /* |
| 428 | * Memory banks |
| 429 | * FIXME: for the moment is this fixed at 4 - need to adjust with a variable |
| 430 | * FIXME: Should the allocation of mbankp be in ss_init instead ? |
| 431 | */ |
| 432 | |
| 433 | procp->num_mbanks = 4; |
| 434 | procp->mbankp = Xcalloc(procp->num_mbanks, ss_dram_bank_t); |
| 435 | |
| 436 | pd = Xcalloc(1, config_dev_t); |
| 437 | pd->is_implied = true; |
| 438 | pd->dev_typep = &dev_type_ss_dram_ctl; |
| 439 | pd->devp = (void*)procp; |
| 440 | procp->dram_ctl_devp = pd; |
| 441 | |
| 442 | insert_domain_address(domainp, pd, 0x9700000000LL, |
| 443 | 0x9700000000LL+4096LL*(uint64_t)procp->num_mbanks); |
| 444 | |
| 445 | overlapp = insert_domain_device(domainp, pd); |
| 446 | if (overlapp != NULL) { |
| 447 | lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx", |
| 448 | overlapp->dev_typep->dev_type_namep, |
| 449 | overlapp->addrp->baseaddr, |
| 450 | pd->dev_typep->dev_type_namep, |
| 451 | pd->addrp->baseaddr); |
| 452 | } |
| 453 | |
| 454 | /* |
| 455 | * IOB |
| 456 | */ |
| 457 | procp->iobp = Xcalloc(1, ss_iob_t); |
| 458 | |
| 459 | pd = Xcalloc(1, config_dev_t); |
| 460 | pd->is_implied = true; |
| 461 | pd->dev_typep = &dev_type_ss_iob; |
| 462 | pd->devp = (void*)procp; |
| 463 | procp->iob_devp = pd; |
| 464 | |
| 465 | insert_domain_address(domainp, pd, 0x9800000000LL, |
| 466 | 0x9800000000LL+0x100000000LL); |
| 467 | |
| 468 | overlapp = insert_domain_device(domainp, pd); |
| 469 | if (overlapp != NULL) { |
| 470 | lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx", |
| 471 | overlapp->dev_typep->dev_type_namep, |
| 472 | overlapp->addrp->baseaddr, |
| 473 | pd->dev_typep->dev_type_namep, |
| 474 | pd->addrp->baseaddr); |
| 475 | } |
| 476 | |
| 477 | /* |
| 478 | * JBI |
| 479 | */ |
| 480 | procp->jbip = Xcalloc(1, ss_jbi_t); |
| 481 | |
| 482 | pd = Xcalloc(1, config_dev_t); |
| 483 | pd->is_implied = true; |
| 484 | pd->dev_typep = &dev_type_ss_jbi; |
| 485 | pd->devp = (void*)procp; |
| 486 | procp->jbi_devp = pd; |
| 487 | |
| 488 | insert_domain_address(domainp, pd, 0x8000000000LL, |
| 489 | 0x8000000000LL+0x100000000LL); |
| 490 | |
| 491 | overlapp = insert_domain_device(domainp, pd); |
| 492 | if (overlapp != NULL) { |
| 493 | lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx", |
| 494 | overlapp->dev_typep->dev_type_namep, |
| 495 | overlapp->addrp->baseaddr, |
| 496 | pd->dev_typep->dev_type_namep, |
| 497 | pd->addrp->baseaddr); |
| 498 | } |
| 499 | |
| 500 | /* |
| 501 | * JBUS |
| 502 | */ |
| 503 | procp->jbusp = Xcalloc(1, ss_jbus_t); |
| 504 | |
| 505 | pd = Xcalloc(1, config_dev_t); |
| 506 | pd->is_implied = true; |
| 507 | pd->dev_typep = &dev_type_ss_jbus; |
| 508 | pd->devp = (void*)procp; |
| 509 | procp->jbus_devp = pd; |
| 510 | |
| 511 | insert_domain_address(domainp, pd, 0x9f00000000LL, |
| 512 | 0x9f00000000LL+0x100000000LL); |
| 513 | |
| 514 | overlapp = insert_domain_device(domainp, pd); |
| 515 | if (overlapp != NULL) { |
| 516 | lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx", |
| 517 | overlapp->dev_typep->dev_type_namep, |
| 518 | overlapp->addrp->baseaddr, |
| 519 | pd->dev_typep->dev_type_namep, |
| 520 | pd->addrp->baseaddr); |
| 521 | } |
| 522 | |
| 523 | /* |
| 524 | * L2 Cache banks |
| 525 | */ |
| 526 | procp->num_l2banks = L2_BANKS; |
| 527 | procp->l2p = Xcalloc(1, ss_l2_cache_t); |
| 528 | |
| 529 | pd = Xcalloc(1, config_dev_t); |
| 530 | pd->is_implied = true; |
| 531 | pd->dev_typep = &dev_type_ss_l2_ctl; |
| 532 | pd->devp = (void*)procp; |
| 533 | procp->l2_ctl_devp = pd; |
| 534 | |
| 535 | insert_domain_address(domainp, pd, 0xA000000000LL, |
| 536 | 0xA000000000LL+0x1F00000000LL); |
| 537 | |
| 538 | overlapp = insert_domain_device(domainp, pd); |
| 539 | if (overlapp != NULL) { |
| 540 | lex_fatal("device \"%s\" @ 0x%llx overlaps with device \"%s\" @ 0x%llx", |
| 541 | overlapp->dev_typep->dev_type_namep, |
| 542 | overlapp->addrp->baseaddr, |
| 543 | pd->dev_typep->dev_type_namep, |
| 544 | pd->addrp->baseaddr); |
| 545 | } |
| 546 | |
| 547 | /* |
| 548 | * SSI |
| 549 | */ |
| 550 | procp->ssip = Xcalloc(1, ss_ssi_t); |
| 551 | |
| 552 | pd = Xcalloc(1, config_dev_t); |
| 553 | pd->is_implied = true; |
| 554 | pd->dev_typep = &dev_type_ss_ssi; |
| 555 | pd->devp = (void*)procp; |
| 556 | procp->ssi_devp = pd; |
| 557 | |
| 558 | insert_domain_address(domainp, pd, 0xff00000000LL, |
| 559 | 0xff00000000LL+0x10000000LL); |
| 560 | } |
| 561 | |
| 562 | |
| 563 | /* |
| 564 | * Basic module init |
| 565 | * |
| 566 | * Returns false if error initialising module, true if init was OK |
| 567 | */ |
| 568 | bool_t niagara_init_proc_type(proc_type_t * proctp) |
| 569 | { |
| 570 | if (proctp->flag_initialised) { |
| 571 | warning("Initialisation of module %s more than once - bailing", proctp->proc_type_namep); |
| 572 | return true; |
| 573 | } |
| 574 | |
| 575 | /* stuff here we only need to do once if we want to use this module */ |
| 576 | niagara_init_trap_list(); |
| 577 | |
| 578 | proctp->flag_initialised = true; |
| 579 | |
| 580 | return true; |
| 581 | } |
| 582 | |
| 583 | |
| 584 | /* |
| 585 | * We arrive here because: |
| 586 | * 1) a malformed (unaligned PC) |
| 587 | * 2) a TLB / icache miss |
| 588 | * 3) an x-cache miss |
| 589 | */ |
| 590 | |
| 591 | |
| 592 | void ss_xic_miss(simcpu_t * sp, xicache_line_t * xc_linep, tvaddr_t pc) |
| 593 | { |
| 594 | tvaddr_t va, tag; |
| 595 | tpaddr_t pa, pa_tag; |
| 596 | config_addr_t * cap; |
| 597 | tpaddr_t extent; |
| 598 | uint8_t * bufp; |
| 599 | sparcv9_cpu_t * v9p; |
| 600 | ss_strand_t * nsp; |
| 601 | ss_proc_t * npp; |
| 602 | uint_t context, bank; |
| 603 | error_conf_t * ep; |
| 604 | error_t * errorp; |
| 605 | |
| 606 | v9p = (sparcv9_cpu_t *)(sp->specificp); |
| 607 | nsp = v9p->impl_specificp; |
| 608 | npp = sp->config_procp->procp; |
| 609 | #if ERROR_INJECTION |
| 610 | errorp = sp->errorp; |
| 611 | #endif |
| 612 | |
| 613 | /* FIXME: need a current context variable, not a test here */ |
| 614 | context = (v9p->tl>0) ? SS_NUCLEUS_CONTEXT : nsp->pri_context; |
| 615 | |
| 616 | /* Quick check then for v9 bus error */ |
| 617 | |
| 618 | /* The PC always has bits 0 & 1 zero */ |
| 619 | ASSERT((pc & 0x3) == 0); |
| 620 | |
| 621 | /* align the pc to the start of the XC line */ |
| 622 | va = pc; |
| 623 | tag = va & XICACHE_TAG_PURE_MASK; |
| 624 | |
| 625 | /* |
| 626 | * Perform a virtual to physical translation |
| 627 | * so we can determine if we are dealing with |
| 628 | * a TLB miss or simply an x-cache miss. |
| 629 | */ |
| 630 | |
| 631 | /* Find the pa corresponding to the line we need */ |
| 632 | /* We assume that for SunSPARC, the TLB is off in Hyper priv mode */ |
| 633 | /* FIXME: we should probably do this by swizzling a function pointer */ |
| 634 | /* for this when we change mode, rather that having an if here ... fix later */ |
| 635 | |
| 636 | pa_tag = tag; |
| 637 | |
| 638 | if (v9p->pstate.addr_mask) { |
| 639 | pc &= MASK64(31,0); |
| 640 | pa_tag &= MASK64(31,0); |
| 641 | va &= MASK64(31,0); |
| 642 | /* NOTE: we dont mask tag ... we allow that to match the 64bit address */ |
| 643 | } |
| 644 | |
| 645 | pa = va; |
| 646 | if (!nsp->mmu_bypass) { |
| 647 | uint_t idx, partid; |
| 648 | ss_tlb_t * tlbp; |
| 649 | tlb_entry_t * tep; |
| 650 | uint_t flags; |
| 651 | ss_trap_type_t miss_trap_type; |
| 652 | uint_t miss_context; |
| 653 | |
| 654 | /* If MMU disabled, but we're in priv/user mode use real addresses */ |
| 655 | |
| 656 | if (!nsp->immu.enabled) { |
| 657 | context = SS_TLB_REAL_CONTEXT; |
| 658 | } |
| 659 | |
| 660 | /* |
| 661 | * check out of range address (if lie within the "VA hole") |
| 662 | */ |
| 663 | if ((va >= SS_VA_HOLE_LB) && (va <= SS_VA_HOLE_UB)) { |
| 664 | niagara_set_sfsr(sp, &nsp->immu, va, |
| 665 | MMU_SFSR_FT_VARANGE, (v9p->tl>0) ? |
| 666 | ss_ctx_nucleus : ss_ctx_primary, 0/*fixme*/, 0, 0); |
| 667 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t) |
| 668 | SS_trap_instruction_access_exception); |
| 669 | return; |
| 670 | } |
| 671 | |
| 672 | tlbp = nsp->itlbp; |
| 673 | RW_rdlock(&tlbp->rwlock); |
| 674 | |
| 675 | partid = nsp->partid; |
| 676 | |
| 677 | /* FIXME: Need a better hash than this ! */ |
| 678 | idx = va >> SS_MAX_PAGE_SIZE_BITS; |
| 679 | idx += context + partid; |
| 680 | |
| 681 | idx &= SS_TLB_HASH_MASK; |
| 682 | |
| 683 | /* |
| 684 | * So we search for a matching page using the info we have in the |
| 685 | * hash - while another thread might possibly be removing or |
| 686 | * inserting an entry into the same table. |
| 687 | */ |
| 688 | |
| 689 | |
| 690 | for ( tep = tlbp->hash[idx].ptr; tep!=(tlb_entry_t*)0; tep = tep->nextp ) { |
| 691 | /* try and match the entry as appropriate */ |
| 692 | if (((tep->tag_pfn ^ va)>>tep->match_shift)==0 && tep->match_context==context && tep->partid == partid) goto tlb_match; |
| 693 | } |
| 694 | RW_unlock(&tlbp->rwlock); |
| 695 | |
| 696 | DBGMISS( lprintf(sp->gid, "itlb miss: pc=%lx va=%lx ctx=%x\n", pc, va, context); ); |
| 697 | /* |
| 698 | * If the MMU is "disabled" in privileged mode ... this is a real miss, not a |
| 699 | * virtual translation miss, so the fault context and trap type is different |
| 700 | */ |
| 701 | if (nsp->immu.enabled) { |
| 702 | miss_context = context; |
| 703 | miss_trap_type = SS_trap_fast_instruction_access_MMU_miss; |
| 704 | } else { |
| 705 | miss_context = 0; /* null for ra->pa miss undefined ? */ |
| 706 | miss_trap_type = SS_trap_instruction_real_translation_miss; |
| 707 | } |
| 708 | |
| 709 | VA48_WARNING(sp, va); |
| 710 | SET_ITLB_FAULT( nsp, va ); |
| 711 | nsp->immu.tag_access_reg = (va & ~MASK64(12,0)) | miss_context; /* FIXME: - do properly later */ |
| 712 | DBGMMU( lprintf(sp->gid, "IMMU tag access = 0x%llx\n", nsp->immu.tag_access_reg); ); |
| 713 | MEMORY_ACCESS_TRAP(); |
| 714 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)miss_trap_type); |
| 715 | return; |
| 716 | |
| 717 | tlb_match:; |
| 718 | flags = tep->flags; |
| 719 | pa += tep->pa_offset; |
| 720 | pa_tag += tep->pa_offset; |
| 721 | |
| 722 | RW_unlock(&tlbp->rwlock); |
| 723 | |
| 724 | /* |
| 725 | * Errors on itlb hit: stash table_entry pointer and if |
| 726 | * subsequent itlb hit on same entry post error again. |
| 727 | */ |
| 728 | #if ERROR_INJECTION |
| 729 | if (sp->error_check && (ep = find_errconf(sp, IFETCH, IMDU))) { |
| 730 | if (errorp->itep) { |
| 731 | DBGERR( lprintf(sp->gid, "ss_xic_miss(): " |
| 732 | " errorp->itep=%x, tep=%x\n", errorp->itep, tep); ); |
| 733 | if ((tlb_entry_t *)errorp->itep == tep) { |
| 734 | ss_error_condition(sp, ep); |
| 735 | return; |
| 736 | } |
| 737 | } else { |
| 738 | errorp->itep = tep; |
| 739 | ss_error_condition(sp, ep); |
| 740 | return; |
| 741 | } |
| 742 | } |
| 743 | #endif |
| 744 | |
| 745 | /* |
| 746 | * privilege test |
| 747 | */ |
| 748 | if ( (flags & SS_TLB_FLAG_PRIV) && v9p->state == V9_User) { |
| 749 | VA48_WARNING(sp, va); |
| 750 | SET_ITLB_FAULT( nsp, va ); |
| 751 | nsp->immu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | context; /* FIXME: - do properly later */ |
| 752 | DBGMMU( lprintf(sp->gid, "priv mapping, state==user: IMMU tag access = 0x%llx\n", nsp->immu.tag_access_reg); ); |
| 753 | MEMORY_ACCESS_TRAP(); |
| 754 | niagara_set_sfsr(sp, &nsp->immu, va, MMU_SFSR_FT_PRIV, (v9p->tl>0) ? ss_ctx_nucleus : ss_ctx_primary, 0/*fixme*/, 0, 0); |
| 755 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)SS_trap_instruction_access_exception); |
| 756 | return; |
| 757 | } |
| 758 | |
| 759 | /* Niagara has no EXEC permission check for I fetches */ |
| 760 | } else { |
| 761 | /* Niagara only implements 40 bits of PA, the tlb code |
| 762 | masks PA so here we need to mask bypass PAs */ |
| 763 | pa &= MASK64(39,0); |
| 764 | } |
| 765 | |
| 766 | /* |
| 767 | * Now that we have the internal PA, map it to the real |
| 768 | * external PA before looking it up in the domain. |
| 769 | * This does not modify memory addresses, only JBus addresses. |
| 770 | */ |
| 771 | |
| 772 | if (pa >= 0x800e000000ull && pa < 0x8010000000ull) { |
| 773 | pa &= 0xffffffffull; |
| 774 | pa |= 0x40000000000ull; |
| 775 | } else if (pa >= 0x8010000000ull && pa < 0x8100000000ull) { |
| 776 | pa &= 0xffffffffull; |
| 777 | pa |= 0x60000000000ull; |
| 778 | } else if (pa >= 0xc000000000ull && pa < 0xff00000000ull) { |
| 779 | pa |= 0x70000000000ull; |
| 780 | } |
| 781 | |
| 782 | /* |
| 783 | * OK - now go get the instructions to fill in the xc-line |
| 784 | * ... start by finding the device that has the |
| 785 | * memory we need. |
| 786 | * optimise: by guessing at the last device found. |
| 787 | * |
| 788 | */ |
| 789 | |
| 790 | /* now find the device - looking in the cache first */ |
| 791 | |
| 792 | cap = sp->xic_miss_addrp; |
| 793 | |
| 794 | if (!(cap && (cap->baseaddr <= pa) && (pa < cap->topaddr))) { |
| 795 | domain_t * domainp; |
| 796 | config_proc_t * config_procp; |
| 797 | |
| 798 | config_procp = sp->config_procp; |
| 799 | domainp = config_procp->domainp; |
| 800 | |
| 801 | cap = find_domain_address(domainp, pa); |
| 802 | if (cap == NULL) { |
| 803 | /* OK it's a bus error there was no backing store */ |
| 804 | |
| 805 | fatal("bus error - instruction fetch from pc=0x%llx " |
| 806 | "(cacheline va=0x%llx -> physical 0x%llx)", pc, va, pa); /* FIXME */ |
| 807 | } |
| 808 | |
| 809 | sp->xic_miss_addrp = cap; /* cache for next time */ |
| 810 | } |
| 811 | |
| 812 | /* try and get the buffer pointer */ |
| 813 | |
| 814 | extent = cap->config_devp->dev_typep->dev_cacheable(cap, DA_Instn, pa_tag-cap->baseaddr, &bufp); |
| 815 | |
| 816 | if (extent < XICACHE_LINE_SIZE) { |
| 817 | /* bus error again ? or fill from multiple devices ? */ |
| 818 | fatal("fix bus error 2"); |
| 819 | /* FIXME */ |
| 820 | } |
| 821 | |
| 822 | /* |
| 823 | * Errors on ifetch to icache or L2 cache |
| 824 | * Make sure the L2 cache is enabled |
| 825 | */ |
| 826 | #if ERROR_INJECTION |
| 827 | if (sp->error_check == true && errorp->check_xicache) { |
| 828 | DBGERR( lprintf(sp->gid, "ss_xic_miss(): ifetch cache hit\n"); ); |
| 829 | |
| 830 | ep = find_errconf(sp, IFETCH, ITC|IDC|LDAC|LDAU|DAC|DAU); |
| 831 | |
| 832 | if (ep) |
| 833 | switch(ep->type) { |
| 834 | case ITC: |
| 835 | case IDC: |
| 836 | errorp->addr = pa; |
| 837 | ss_error_condition(sp, ep); |
| 838 | break; |
| 839 | |
| 840 | case LDAC: |
| 841 | case LDAU: |
| 842 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 843 | if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled; |
| 844 | } |
| 845 | errorp->addr = pa; |
| 846 | ss_error_condition(sp, ep); |
| 847 | break; |
| 848 | |
| 849 | case DAC: |
| 850 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 851 | if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled; |
| 852 | } |
| 853 | errorp->addr = pa; |
| 854 | ss_error_condition(sp, ep); |
| 855 | break; |
| 856 | |
| 857 | case DAU: |
| 858 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 859 | if (npp->l2p->control[bank] & L2_DIS) goto l2_disabled; |
| 860 | } |
| 861 | errorp->addr = pa; |
| 862 | ss_error_condition(sp, ep); |
| 863 | break; |
| 864 | |
| 865 | l2_disabled: DBGERR( lprintf(sp->gid, "ss_xic_miss: No LDAC/LDAU Error" |
| 866 | " - L2 disabled\n"); ); |
| 867 | break; |
| 868 | default: |
| 869 | break; |
| 870 | } |
| 871 | } |
| 872 | #endif |
| 873 | |
| 874 | xc_linep->tag = tag | sp->tagstate; |
| 875 | xc_linep->memoryoffset = ((uint64_t)bufp)-tag; |
| 876 | |
| 877 | /* |
| 878 | * FIXME: If breakpoints are in use make sure we really clear the decoded line |
| 879 | * to ensure that we dont get instruction aliasing. XI-cache prob. needs a re-design |
| 880 | * from this standpoint - but this will wait until we complete the JIT version. |
| 881 | * Until then this is a reminder and a place holder. |
| 882 | */ |
| 883 | if (sp->bp_infop) xicache_clobber_line_decodes(sp, tag); |
| 884 | #if 0 /* { */ |
| 885 | xicache_line_fill_risc4(sp, xc_linep, tag, bufp); |
| 886 | #endif /* } */ |
| 887 | } |
| 888 | |
| 889 | |
| 890 | /* |
| 891 | * This is not the worlds most efficient routine, but then we assume that ASI's are |
| 892 | * not frequently occurring memory access types - we may have to fast path the |
| 893 | * ASI_AS_IF_USER_PRIMARY etc. some how if used frequently by kernel b-copy. |
| 894 | */ |
| 895 | |
| 896 | |
| 897 | void |
| 898 | ss_asi_access(simcpu_t * sp, maccess_t op, uint_t regnum, uint_t asi, |
| 899 | uint64_t reg1, uint64_t reg2, asi_flag_t asi_flag) |
| 900 | { |
| 901 | sparcv9_cpu_t * v9p; |
| 902 | ss_strand_t * nsp; |
| 903 | ss_proc_t *npp; |
| 904 | uint64_t val; |
| 905 | ss_tsb_info_t * tsbinfop, * tsbinfop1; |
| 906 | ss_mmu_t * mmup; |
| 907 | ss_tlb_t * tlbp; |
| 908 | bool_t is_load; |
| 909 | uint_t size, mask; |
| 910 | uint_t context_type, idx; |
| 911 | tvaddr_t addr; |
| 912 | mem_flags_t mflags; |
| 913 | bool_t is_real; |
| 914 | sparcv9_trap_type_t tt; |
| 915 | error_conf_t * ep; |
| 916 | |
| 917 | v9p = (sparcv9_cpu_t *)(sp->specificp); |
| 918 | nsp = v9p->impl_specificp; |
| 919 | npp = (ss_proc_t *)(sp->config_procp->procp); |
| 920 | |
| 921 | ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]); |
| 922 | |
| 923 | if (asi == V9_ASI_IMPLICIT) |
| 924 | goto no_asi_valid_checks; |
| 925 | |
| 926 | /* |
| 927 | * First check if this is a legitimate ASI based |
| 928 | * on current privilege level. |
| 929 | */ |
| 930 | |
| 931 | switch( v9p->state ) { |
| 932 | case V9_User: |
| 933 | ASSERT( !v9p->pstate.priv && !v9p->hpstate.hpriv ); |
| 934 | if (asi<0x80) { |
| 935 | addr = ((op & MA_Op_Mask) == MA_CAS) ? |
| 936 | reg1 : (reg1 + reg2); |
| 937 | niagara_set_sfsr(sp, &nsp->dmmu, addr, MMU_SFSR_FT_ASI, ss_ctx_nucleus/*checkme*/, asi, 0/*fixme*/, 0); |
| 938 | v9p->post_precise_trap(sp, Sparcv9_trap_privileged_action); |
| 939 | return; |
| 940 | } |
| 941 | break; |
| 942 | case V9_Priv: |
| 943 | ASSERT( v9p->pstate.priv && !v9p->hpstate.hpriv ); |
| 944 | if (asi>=0x30 && asi<0x80) { |
| 945 | /* ASIs reserved for hpriv mode appear to priv mode as data access exceptions */ |
| 946 | MEMORY_ACCESS_TRAP(); |
| 947 | addr = ((op & MA_Op_Mask) == MA_CAS) ? |
| 948 | reg1 : (reg1 + reg2); |
| 949 | niagara_set_sfsr(sp, &nsp->dmmu, addr, MMU_SFSR_FT_ASI, ss_ctx_nucleus/*checkme*/, asi, 0/*fixme*/, 0); |
| 950 | v9p->post_precise_trap(sp, Sparcv9_trap_data_access_exception); |
| 951 | return; |
| 952 | } |
| 953 | break; |
| 954 | case V9_HyperPriv: |
| 955 | ASSERT( v9p->hpstate.hpriv ); |
| 956 | break; |
| 957 | case V9_RED: |
| 958 | ASSERT( v9p->hpstate.red ); |
| 959 | break; |
| 960 | default: |
| 961 | abort(); |
| 962 | } |
| 963 | |
| 964 | no_asi_valid_checks:; |
| 965 | |
| 966 | /* |
| 967 | * Next pull out all the memory access ASIs ... |
| 968 | */ |
| 969 | |
| 970 | mflags = (V9_User != v9p->state) ? MF_Has_Priv : 0; |
| 971 | context_type = ss_ctx_reserved; |
| 972 | mask = (1<<(op & MA_Size_Mask))-1; |
| 973 | |
| 974 | switch(asi) { |
| 975 | case V9_ASI_IMPLICIT: |
| 976 | if (v9p->tl > 0) { |
| 977 | asi = (v9p->pstate.cle) ? SS_ASI_NUCLEUS_LITTLE : SS_ASI_NUCLEUS; |
| 978 | goto ss_asi_nucleus; |
| 979 | } |
| 980 | asi = (v9p->pstate.cle) ? SS_ASI_PRIMARY_LITTLE : SS_ASI_PRIMARY; |
| 981 | goto ss_asi_primary; |
| 982 | |
| 983 | case SS_ASI_NUCLEUS_LITTLE: |
| 984 | case SS_ASI_NUCLEUS: |
| 985 | ss_asi_nucleus:; |
| 986 | asi_nuc:; |
| 987 | context_type = ss_ctx_nucleus; |
| 988 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 989 | if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access; |
| 990 | goto memory_access; |
| 991 | |
| 992 | case SS_ASI_PRIMARY_NO_FAULT_LITTLE: |
| 993 | case SS_ASI_PRIMARY_NO_FAULT: |
| 994 | if (IS_V9_MA_STORE(op & MA_Op_Mask)) |
| 995 | goto data_access_exception; |
| 996 | mflags |= MF_No_Fault; |
| 997 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 998 | goto asi_prim; |
| 999 | |
| 1000 | case SS_ASI_AS_IF_USER_PRIMARY_LITTLE: |
| 1001 | case SS_ASI_AS_IF_USER_PRIMARY: |
| 1002 | mflags &= ~MF_Has_Priv; |
| 1003 | goto asi_prim; |
| 1004 | |
| 1005 | case SS_ASI_PRIMARY_LITTLE: /* (88) RW Implicit Primary Address space (LE) */ |
| 1006 | case SS_ASI_PRIMARY: /* (80) RW Implicit Primary Address space */ |
| 1007 | ss_asi_primary:; |
| 1008 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1009 | asi_prim:; |
| 1010 | if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access; |
| 1011 | context_type = ss_ctx_primary; |
| 1012 | goto memory_access; |
| 1013 | |
| 1014 | case SS_ASI_SECONDARY_NO_FAULT_LITTLE: |
| 1015 | case SS_ASI_SECONDARY_NO_FAULT: |
| 1016 | if (IS_V9_MA_STORE(op & MA_Op_Mask)) |
| 1017 | goto data_access_exception; |
| 1018 | mflags |= MF_No_Fault; |
| 1019 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1020 | goto asi_sec; |
| 1021 | |
| 1022 | case SS_ASI_AS_IF_USER_SECONDARY_LITTLE: |
| 1023 | case SS_ASI_AS_IF_USER_SECONDARY: |
| 1024 | mflags &= ~MF_Has_Priv; |
| 1025 | goto asi_sec; |
| 1026 | |
| 1027 | case SS_ASI_SECONDARY_LITTLE: |
| 1028 | case SS_ASI_SECONDARY: |
| 1029 | asi_sec:; |
| 1030 | if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access; |
| 1031 | context_type = ss_ctx_secondary; |
| 1032 | goto memory_access; |
| 1033 | |
| 1034 | case SS_ASI_REAL_IO_LITTLE: /* (1D) RW Same as ASI_PHYS_USE_EC_LITTLE for memory |
| 1035 | addresses. For IO addresses, physical address, |
| 1036 | non-cacheable, with side-effect (LE) */ |
| 1037 | case SS_ASI_REAL_IO: /* (15) RW Same as ASI_PHYS_USE_EC for memory addresses. |
| 1038 | For IO addresses, physical address, non-cacheable, |
| 1039 | with side-effect */ |
| 1040 | mflags |= MF_IO_Access; |
| 1041 | mflags |= MF_TLB_Real_Ctx; |
| 1042 | context_type = ss_ctx_nucleus; |
| 1043 | goto memory_access; |
| 1044 | |
| 1045 | case SS_ASI_REAL_MEM_LITTLE: /* (1C) RW physical address, non-allocating in L1 cache */ |
| 1046 | case SS_ASI_REAL_MEM: /* (14) RW physical address, non-allocating in L1 cache */ |
| 1047 | mflags |= MF_TLB_Real_Ctx; |
| 1048 | if (IS_V9_MA_ATOMIC(op & MA_Op_Mask)) mflags |= MF_Atomic_Access; |
| 1049 | context_type = ss_ctx_nucleus; |
| 1050 | goto memory_access; |
| 1051 | |
| 1052 | case SS_ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE: /* RW 64B block load/store, primary address space, user privilege (LE) */ |
| 1053 | case SS_ASI_BLOCK_AS_IF_USER_PRIMARY: /* RW 64B block load/store, primary address space, user privilege */ |
| 1054 | mflags &= ~MF_Has_Priv; |
| 1055 | goto asi_blk_prim; |
| 1056 | |
| 1057 | case SS_ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE: /* RW 64B block load/store, secondary address space, user privilege (LE) */ |
| 1058 | case SS_ASI_BLOCK_AS_IF_USER_SECONDARY: /* RW 64B block load/store, secondary address space, user privilege */ |
| 1059 | mflags &= ~MF_Has_Priv; |
| 1060 | goto asi_blk_sec; |
| 1061 | |
| 1062 | case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P_LITTLE: /* Block initializing store/128b atomic LDDA, primary address, user priv (LE) */ |
| 1063 | case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P: /* Block initializing store/128b atomic LDDA, primary address, user privilege */ |
| 1064 | mflags &= ~MF_Has_Priv; |
| 1065 | goto blk_init_prim; |
| 1066 | |
| 1067 | case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S_LITTLE: /* Block initializing store, secondary address, user privilege (LE) */ |
| 1068 | case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S: /* Block initializing store/128b atomic LDDA, secondary address, user privilege */ |
| 1069 | mflags &= ~MF_Has_Priv; |
| 1070 | goto blk_init_sec; |
| 1071 | |
| 1072 | case SS_ASI_QUAD_LDD_LITTLE: /* 128b atomic LDDA (LE) */ |
| 1073 | case SS_ASI_QUAD_LDD: /* 128b atomic LDDA */ |
| 1074 | /* This ASI must be used with an LDDA instruction */ |
| 1075 | if (MA_lddu64 != op) { |
| 1076 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); |
| 1077 | return; |
| 1078 | } |
| 1079 | /* Adjust size to 128bytes so alignment is correct */ |
| 1080 | op = MA_lddu128; |
| 1081 | mask = (1<<(op & MA_Size_Mask))-1; |
| 1082 | mflags |= MF_Atomic_Access; |
| 1083 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1084 | context_type = ss_ctx_nucleus; |
| 1085 | goto memory_access; |
| 1086 | |
| 1087 | case SS_ASI_QUAD_LDD_REAL_LITTLE: /* 128b atomic LDDA, real address (LE) */ |
| 1088 | case SS_ASI_QUAD_LDD_REAL: /* 128b atomic LDDA, real address */ |
| 1089 | /* This ASI must be used with an LDDA instruction */ |
| 1090 | if (MA_lddu64 != op) { |
| 1091 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); |
| 1092 | return; |
| 1093 | } |
| 1094 | /* Adjust size to 128bytes so alignment is correct */ |
| 1095 | op = MA_lddu128; |
| 1096 | mask = (1<<(op & MA_Size_Mask))-1; |
| 1097 | mflags |= MF_Atomic_Access; |
| 1098 | mflags |= MF_TLB_Real_Ctx; |
| 1099 | context_type = ss_ctx_nucleus; |
| 1100 | goto memory_access; |
| 1101 | |
| 1102 | case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD_LITTLE: /* Block initializing store/128b atomic LDDA (LE) */ |
| 1103 | case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD: /* Block initializing store/128b atomic LDDA */ |
| 1104 | if (MA_lddu64 == op) { |
| 1105 | op = MA_lddu128; |
| 1106 | mask = (1<<(op & MA_Size_Mask))-1; |
| 1107 | mflags |= MF_Atomic_Access; |
| 1108 | goto asi_nuc; |
| 1109 | } else |
| 1110 | if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) { |
| 1111 | /* block init effect */ |
| 1112 | addr = ((op & MA_Op_Mask) == MA_CAS) ? |
| 1113 | reg1 : (reg1 + reg2); |
| 1114 | if ((addr & 0x3f) == 0) |
| 1115 | mflags |= MF_Blk_Init; |
| 1116 | goto asi_nuc; |
| 1117 | |
| 1118 | } |
| 1119 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); |
| 1120 | return; |
| 1121 | |
| 1122 | case SS_ASI_QUAD_LDD_PHYS_LITTLE: /* 128b atomic LDDA, physical address (LE) */ |
| 1123 | case SS_ASI_QUAD_LDD_PHYS: /* N1 PRM rev 1.4: any type of access causes data_access_exception */ |
| 1124 | goto data_access_exception; |
| 1125 | |
| 1126 | case SS_ASI_BLK_INIT_ST_QUAD_LDD_P_LITTLE: /* Block initializing store/128b atomic LDDA, primary address (LE) */ |
| 1127 | case SS_ASI_BLK_INIT_ST_QUAD_LDD_P: /* Block initializing store/128b atomic LDDA, primary address */ |
| 1128 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1129 | blk_init_prim:; |
| 1130 | if (MA_lddu64 == op) { |
| 1131 | op = MA_lddu128; |
| 1132 | mask = (1<<(op & MA_Size_Mask))-1; |
| 1133 | mflags |= MF_Atomic_Access; |
| 1134 | goto asi_prim; |
| 1135 | } else |
| 1136 | if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) { |
| 1137 | /* block init effect */ |
| 1138 | addr = ((op & MA_Op_Mask) == MA_CAS) ? |
| 1139 | reg1 : (reg1 + reg2); |
| 1140 | if ((addr & 0x3f) == 0) |
| 1141 | mflags |= MF_Blk_Init; |
| 1142 | goto asi_prim; |
| 1143 | |
| 1144 | } |
| 1145 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); |
| 1146 | return; |
| 1147 | |
| 1148 | case SS_ASI_BLK_INIT_ST_QUAD_LDD_S_LITTLE: /* Block initializing store/128b atomic LDDA, secondary address (LE) */ |
| 1149 | case SS_ASI_BLK_INIT_ST_QUAD_LDD_S: /* Block initializing store/128b atomic LDDA, secondary address */ |
| 1150 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1151 | blk_init_sec:; |
| 1152 | if (MA_lddu64 == op) { |
| 1153 | op = MA_lddu128; |
| 1154 | mask = (1<<(op & MA_Size_Mask))-1; |
| 1155 | mflags |= MF_Atomic_Access; |
| 1156 | goto asi_sec; |
| 1157 | } else |
| 1158 | if (MA_St == (op & MA_Op_Mask) || MA_StDouble == (op & MA_Op_Mask)) { |
| 1159 | /* block init effect */ |
| 1160 | addr = ((op & MA_Op_Mask) == MA_CAS) ? |
| 1161 | reg1 : (reg1 + reg2); |
| 1162 | if ((addr & 0x3f) == 0) |
| 1163 | mflags |= MF_Blk_Init; |
| 1164 | goto asi_sec; |
| 1165 | |
| 1166 | } |
| 1167 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); |
| 1168 | return; |
| 1169 | |
| 1170 | case SS_ASI_BLK_PL: /* 64B block load/store, primary address (LE) */ |
| 1171 | case SS_ASI_BLK_P: /* 64B block load/store, primary address */ |
| 1172 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1173 | asi_blk_prim:; |
| 1174 | /* This ASI must be used with an LDDFA/STDFA instruction */ |
| 1175 | if (!(MA_ldfp64 == op || MA_stfp64 == op) || |
| 1176 | ((regnum & 0xf) != 0)) { |
| 1177 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); |
| 1178 | return; |
| 1179 | } |
| 1180 | op = (MA_ldfp64 == op) ? (MA_Size512 | MA_LdFloat) : |
| 1181 | (MA_Size512 | MA_StFloat); |
| 1182 | mask = (1<<(op & MA_Size_Mask))-1; |
| 1183 | mflags |= MF_Atomic_Access; |
| 1184 | goto asi_prim; |
| 1185 | |
| 1186 | case SS_ASI_BLK_SL: /* 64B block load/store, secondary address (LE) */ |
| 1187 | case SS_ASI_BLK_S: /* 64B block load/store, secondary address */ |
| 1188 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1189 | asi_blk_sec:; |
| 1190 | /* This ASI must be used with an LDDFA/STDFA instruction */ |
| 1191 | if (!(MA_ldfp64 == op || MA_stfp64 == op) || |
| 1192 | ((regnum & 0xf) != 0)) { |
| 1193 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); |
| 1194 | return; |
| 1195 | } |
| 1196 | op = (MA_ldfp64 == op) ? (MA_Size512 | MA_LdFloat) : |
| 1197 | (MA_Size512 | MA_StFloat); |
| 1198 | mask = (1<<(op & MA_Size_Mask))-1; |
| 1199 | mflags |= MF_Atomic_Access; |
| 1200 | goto asi_sec; |
| 1201 | |
| 1202 | case SS_ASI_PST8_PL: |
| 1203 | case SS_ASI_PST8_P: |
| 1204 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1205 | /* This ASI must be used with STDFA instruction */ |
| 1206 | if (!(MA_stfp64 == op)) { |
| 1207 | goto data_access_exception; |
| 1208 | } |
| 1209 | goto partial_asi_unsupported; |
| 1210 | |
| 1211 | case SS_ASI_PST8_SL: |
| 1212 | case SS_ASI_PST8_S: |
| 1213 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1214 | /* This ASI must be used with STDFA instruction */ |
| 1215 | if (!(MA_stfp64 == op)) { |
| 1216 | goto data_access_exception; |
| 1217 | } |
| 1218 | goto partial_asi_unsupported; |
| 1219 | |
| 1220 | case SS_ASI_PST16_PL: |
| 1221 | case SS_ASI_PST16_P: |
| 1222 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1223 | /* This ASI must be used with STDFA instruction */ |
| 1224 | if (!(MA_stfp64 == op)) { |
| 1225 | goto data_access_exception; |
| 1226 | } |
| 1227 | goto partial_asi_unsupported; |
| 1228 | |
| 1229 | case SS_ASI_PST16_SL: |
| 1230 | case SS_ASI_PST16_S: |
| 1231 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1232 | /* This ASI must be used with STDFA instruction */ |
| 1233 | if (!(MA_stfp64 == op)) { |
| 1234 | goto data_access_exception; |
| 1235 | } |
| 1236 | goto partial_asi_unsupported; |
| 1237 | |
| 1238 | case SS_ASI_PST32_PL: |
| 1239 | case SS_ASI_PST32_P: |
| 1240 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1241 | /* This ASI must be used with STDFA instruction */ |
| 1242 | if (!(MA_stfp64 == op)) { |
| 1243 | goto data_access_exception; |
| 1244 | } |
| 1245 | goto partial_asi_unsupported; |
| 1246 | |
| 1247 | case SS_ASI_PST32_SL: |
| 1248 | case SS_ASI_PST32_S: |
| 1249 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1250 | /* This ASI must be used with STDFA instruction */ |
| 1251 | if (!(MA_stfp64 == op)) { |
| 1252 | goto data_access_exception; |
| 1253 | } |
| 1254 | goto partial_asi_unsupported; |
| 1255 | |
| 1256 | case SS_ASI_FL8_PL: |
| 1257 | case SS_ASI_FL8_P: |
| 1258 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1259 | /* This ASI must be used with an LDDFA/STDFA instruction */ |
| 1260 | if (!(MA_ldfp64 == op || MA_stfp64 == op)) { |
| 1261 | goto data_access_exception; |
| 1262 | } |
| 1263 | goto partial_asi_unsupported; |
| 1264 | |
| 1265 | case SS_ASI_FL8_SL: |
| 1266 | case SS_ASI_FL8_S: |
| 1267 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1268 | /* This ASI must be used with an LDDFA/STDFA instruction */ |
| 1269 | if (!(MA_ldfp64 == op || MA_stfp64 == op)) { |
| 1270 | goto data_access_exception; |
| 1271 | } |
| 1272 | goto partial_asi_unsupported; |
| 1273 | |
| 1274 | case SS_ASI_FL16_PL: |
| 1275 | case SS_ASI_FL16_P: |
| 1276 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1277 | /* This ASI must be used with an LDDFA/STDFA instruction */ |
| 1278 | if (!(MA_ldfp64 == op || MA_stfp64 == op)) { |
| 1279 | goto data_access_exception; |
| 1280 | } |
| 1281 | goto partial_asi_unsupported; |
| 1282 | |
| 1283 | case SS_ASI_FL16_SL: |
| 1284 | case SS_ASI_FL16_S: |
| 1285 | if (nsp->mmu_bypass) mflags |= MF_MMU_Bypass; |
| 1286 | /* This ASI must be used with an LDDFA/STDFA instruction */ |
| 1287 | if (!(MA_ldfp64 == op || MA_stfp64 == op)) { |
| 1288 | goto data_access_exception; |
| 1289 | } |
| 1290 | partial_asi_unsupported:; |
| 1291 | addr = reg1 + reg2; |
| 1292 | if (addr & 0x3) { /* check 32bit alignment */ |
| 1293 | v9p->post_precise_trap(sp, Sparcv9_trap_mem_address_not_aligned); |
| 1294 | return; |
| 1295 | } |
| 1296 | if (addr & 0x7) { /* check 64bit alignment */ |
| 1297 | if (IS_V9_MA_LOAD(op & MA_Op_Mask)) |
| 1298 | v9p->post_precise_trap(sp, Sparcv9_trap_LDDF_mem_address_not_aligned); |
| 1299 | else |
| 1300 | v9p->post_precise_trap(sp, Sparcv9_trap_STDF_mem_address_not_aligned); |
| 1301 | return; |
| 1302 | } |
| 1303 | goto data_access_exception; |
| 1304 | |
| 1305 | case SS_ASI_BLK_COMMIT_P: |
| 1306 | case SS_ASI_BLK_COMMIT_S: |
| 1307 | /* TODO: PRM states alignment checks done. */ |
| 1308 | goto data_access_exception; |
| 1309 | |
| 1310 | memory_access:; |
| 1311 | if ((MA_LdFloat == (op & MA_Op_Mask)) || (MA_StFloat == (op & MA_Op_Mask)) ) { |
| 1312 | ss_memory_asi_access(sp, op, (uint64_t *)&(sp->fpreg.s32[regnum]), mflags, asi, context_type, mask, reg1, reg2); |
| 1313 | } else { |
| 1314 | ss_memory_asi_access(sp, op, &(sp->intreg[regnum]), mflags, asi, context_type, mask, reg1, reg2); |
| 1315 | ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]); |
| 1316 | } |
| 1317 | return; |
| 1318 | |
| 1319 | default: |
| 1320 | break; |
| 1321 | } |
| 1322 | |
| 1323 | |
| 1324 | |
| 1325 | /* OK, derive access address etc. */ |
| 1326 | |
| 1327 | size = op & MA_Size_Mask; |
| 1328 | op &= MA_Op_Mask; |
| 1329 | is_load = IS_V9_MA_LOAD(op); |
| 1330 | |
| 1331 | /* No MA_CAS case required for cpu state registers. */ |
| 1332 | addr = reg1 + reg2; |
| 1333 | |
| 1334 | |
| 1335 | /* |
| 1336 | * Finally all the cpu state registers ... |
| 1337 | * Currently only 64bit accesses supported .. |
| 1338 | * need to ascertain exactly what niagara does here ! FIXME |
| 1339 | * FIXME: Of course all the alt address space accesses are different here ! |
| 1340 | */ |
| 1341 | |
| 1342 | if (size != MA_Size64 || (addr&0x7)!=0 || IS_V9_MA_ATOMIC(op)) |
| 1343 | goto data_access_exception; |
| 1344 | |
| 1345 | ASSERT(MA_LdSigned != op); /* not signed for any stxas or for ldxas */ |
| 1346 | |
| 1347 | #define ITODO(s) do { \ |
| 1348 | IMPL_WARNING(("Unimplemented niagara asi %s (0x%x) accessed with address 0x%llx @ pc=%lx\n", #s, asi, addr, sp->pc)); \ |
| 1349 | if (is_load) { val = 0; goto load_complete; }\ |
| 1350 | } while (0) |
| 1351 | |
| 1352 | /* If we're storing fetch the value to stuff */ |
| 1353 | if (!is_load) { |
| 1354 | if (op == MA_St) { |
| 1355 | val = sp->intreg[regnum]; |
| 1356 | } else { /* MA_StFloat */ |
| 1357 | switch(size) { |
| 1358 | case MA_Size32: |
| 1359 | val = sp->fpreg.s32[regnum]; |
| 1360 | break; |
| 1361 | case MA_Size64: |
| 1362 | val = sp->fpreg.s64[regnum >> 1]; |
| 1363 | break; |
| 1364 | default: |
| 1365 | goto unimplemented; |
| 1366 | } |
| 1367 | } |
| 1368 | }; |
| 1369 | /* Hex Access VA Repli- DESCRIPTION */ |
| 1370 | /* (hex) cated */ |
| 1371 | switch(asi) { |
| 1372 | |
| 1373 | /* MANDATORY SPARC V9 ASIs */ |
| 1374 | |
| 1375 | /* All in the memory section above */ |
| 1376 | |
| 1377 | /* SunSPARC EXTENDED (non-V9) ASIs */ |
| 1378 | |
| 1379 | case SS_ASI_SCRATCHPAD: |
| 1380 | /* |
| 1381 | * 0x20 RW 0-18 Y Scratchpad Registers |
| 1382 | * 0x20 - 20-28 N any type of access causes data_access_exception |
| 1383 | * 0x20 RW 30-38 Y Scratchpad Registers |
| 1384 | */ |
| 1385 | |
| 1386 | if (INVALID_SCRATCHPAD(addr)) { |
| 1387 | goto data_access_exception; |
| 1388 | } else { |
| 1389 | uint64_t * valp = |
| 1390 | &(nsp->strand_reg[SSR_ScratchPad0 + (addr>>3)]); |
| 1391 | if (is_load) { |
| 1392 | val = *valp; |
| 1393 | goto load_complete; |
| 1394 | } |
| 1395 | DBGSCRATCH( if (*valp != val) |
| 1396 | lprintf(sp->gid, "SCRATCH store 0x%x/0x%llx: " |
| 1397 | "0x%llx -> 0x%llx pc=0x%llx\n", |
| 1398 | asi, addr, *valp, val, sp->pc); ); |
| 1399 | *valp = val; |
| 1400 | } |
| 1401 | break; |
| 1402 | |
| 1403 | case SS_ASI_MMU: |
| 1404 | /* Niagara 1: |
| 1405 | * 0x21 RW 8 Y I/DMMU Primary Context Register |
| 1406 | * 0x21 RW 10 Y DMMU Secondary Context Register |
| 1407 | * 0x21 RW 120 Y I/DMMU Synchronous Fault Pointer |
| 1408 | * Niagara 2: |
| 1409 | * 0x21 RW 108 Y I/DMMU Primary Context Register 1 |
| 1410 | * 0x21 RW 110 Y DMMU Secondary Context Register 1 |
| 1411 | */ |
| 1412 | if (is_load) { |
| 1413 | switch(addr) { |
| 1414 | case 0x08: |
| 1415 | val = (uint64_t)(nsp->pri_context); |
| 1416 | goto load_complete; |
| 1417 | case 0x10: |
| 1418 | val = (uint64_t)(nsp->sec_context); |
| 1419 | goto load_complete; |
| 1420 | default: |
| 1421 | break; |
| 1422 | } |
| 1423 | goto data_access_exception; |
| 1424 | } else { |
| 1425 | /* |
| 1426 | * Since we're changing a context register we should |
| 1427 | * flush the xi and xd trans caches. However, this only matters |
| 1428 | * for the primary context - iff we are in priv mode with |
| 1429 | * TL=0. For all other cases (TL>0) or hpriv=1, either the |
| 1430 | * MMU is not in use, or we're executing the nucleus context so |
| 1431 | * we can rely on a done/retry instn / mode change to do the flush for us |
| 1432 | * when we change mode later. |
| 1433 | */ |
| 1434 | DBGMMU( lprintf(sp->gid, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 1435 | switch(addr) { |
| 1436 | case 0x08: |
| 1437 | val &= MASK64(12,0); |
| 1438 | if (nsp->pri_context!=val) { |
| 1439 | sp->xicache_trans_flush_pending = true; |
| 1440 | sp->xdcache_trans_flush_pending = true; |
| 1441 | xcache_set_tagstate(sp); |
| 1442 | } |
| 1443 | nsp->pri_context = val; |
| 1444 | break; |
| 1445 | case 0x10: |
| 1446 | nsp->sec_context = val & MASK64(12,0); |
| 1447 | break; |
| 1448 | default: |
| 1449 | goto data_access_exception; |
| 1450 | } |
| 1451 | } |
| 1452 | break; |
| 1453 | |
| 1454 | case SS_ASI_QUEUE: /* 0x25 RW 3C0 Y CPU Mondo Queue Head Pointer */ |
| 1455 | /* 0x25 RW 3C8 Y CPU Mondo Queue Tail Pointer */ |
| 1456 | /* 0x25 RW 3D0 Y Device Mondo Queue Head Pointer */ |
| 1457 | /* 0x25 RW 3D8 Y Device Mondo Queue Tail Pointer */ |
| 1458 | /* 0x25 RW 3E0 Y Resumable Error Queue Head Pointer */ |
| 1459 | /* 0x25 RW 3E8 Y Resumable Error Queue Tail Pointer */ |
| 1460 | /* 0x25 RW 3F0 Y Nonresumable Error Queue Head Pointer */ |
| 1461 | /* 0x25 RW 3F8 Y Nonresumable Error Queue Tail Pointer */ |
| 1462 | /* |
| 1463 | * According to the PRM (1.8 Table 9-3), Niagara will |
| 1464 | * 'nop' loads or stores to addresses 0-0x3b8. |
| 1465 | */ |
| 1466 | if (is_load) { |
| 1467 | |
| 1468 | switch(addr) { |
| 1469 | case 0x3c0: |
| 1470 | case 0x3d0: |
| 1471 | case 0x3e0: |
| 1472 | case 0x3f0: |
| 1473 | val = (uint16_t)(nsp->nqueue[ (addr>>4) - 0x3c].head); |
| 1474 | break; |
| 1475 | case 0x3c8: |
| 1476 | case 0x3d8: |
| 1477 | case 0x3e8: |
| 1478 | case 0x3f8: |
| 1479 | val = (uint16_t)(nsp->nqueue[(addr>>4) - 0x3c].tail); |
| 1480 | break; |
| 1481 | default: |
| 1482 | goto data_access_exception; |
| 1483 | } |
| 1484 | DBGMONDO( lprintf(sp->gid, "ASI_QUEUE load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 1485 | goto load_complete; |
| 1486 | } else { |
| 1487 | DBGMONDO( lprintf(sp->gid, "ASI_QUEUE store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 1488 | RSVD_MASK(sp, MASK64(13, 6), val, asi, addr); |
| 1489 | switch(addr) { |
| 1490 | case 0x3c0: |
| 1491 | case 0x3d0: |
| 1492 | case 0x3e0: |
| 1493 | case 0x3f0: |
| 1494 | nsp->nqueue[(addr>>4) - 0x3c].head = (uint16_t)val; |
| 1495 | nsp->flag_queue_irq[(addr>>4)- 0x3c] = nsp->nqueue[(addr>>4) - 0x3c].head != nsp->nqueue[(addr>>4) - 0x3c].tail; |
| 1496 | break; |
| 1497 | case 0x3c8: |
| 1498 | case 0x3d8: |
| 1499 | case 0x3e8: |
| 1500 | case 0x3f8: |
| 1501 | if (v9p->state != V9_HyperPriv && |
| 1502 | v9p->state != V9_RED) |
| 1503 | goto data_access_exception; /* DAX if store to tail in privileged mode */ |
| 1504 | nsp->nqueue[(addr>>4) - 0x3c].tail = (uint16_t)val; |
| 1505 | nsp->flag_queue_irq[(addr>>4)- 0x3c] = nsp->nqueue[(addr>>4) - 0x3c].head != nsp->nqueue[(addr>>4) - 0x3c].tail; |
| 1506 | break; |
| 1507 | default: |
| 1508 | goto data_access_exception; |
| 1509 | } |
| 1510 | ss_check_interrupts(sp); |
| 1511 | } |
| 1512 | break; |
| 1513 | |
| 1514 | case SS_ASI_DIRECT_MAP_ECACHE: /* 0x30 - - - N1 PRM rev 1.4: any type of access causes data_access_exception */ |
| 1515 | goto data_access_exception; |
| 1516 | |
| 1517 | case SS_ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0: /* 0x31 RW 0 Y DMMU Context Zero TSB Base PS0 */ |
| 1518 | tsbinfop = &(nsp->dmmu_ctxt_zero_tsb_ps0); |
| 1519 | goto mmu_tsb_base; |
| 1520 | |
| 1521 | case SS_ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1: /* 0x32 RW 0 Y DMMU Context Zero TSB Base PS1 */ |
| 1522 | tsbinfop = &(nsp->dmmu_ctxt_zero_tsb_ps1); |
| 1523 | goto mmu_tsb_base; |
| 1524 | |
| 1525 | case SS_ASI_DMMU_CTXT_ZERO_CONFIG: /* 0x33 RW 0 Y DMMU Context Zero Config Register */ |
| 1526 | tsbinfop = &(nsp->dmmu_ctxt_zero_tsb_ps0); |
| 1527 | tsbinfop1 = &(nsp->dmmu_ctxt_zero_tsb_ps1); |
| 1528 | goto mmu_tsb_config; |
| 1529 | |
| 1530 | case SS_ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0: /* 0x35 RW 0 Y IMMU Context Zero TSB Base PS0 */ |
| 1531 | tsbinfop = &(nsp->immu_ctxt_zero_tsb_ps0); |
| 1532 | mmu_tsb_base:; |
| 1533 | if (is_load) { |
| 1534 | val = tsbinfop->reg_tsb_base; |
| 1535 | DBGMMU( lprintf(sp->gid, "MMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 1536 | goto load_complete; |
| 1537 | } else { |
| 1538 | uint_t tsb_size; |
| 1539 | bool_t is_split; |
| 1540 | |
| 1541 | DBGMMU( lprintf(sp->gid, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 1542 | tsbinfop->reg_tsb_base = val; |
| 1543 | |
| 1544 | tsb_size = val & MASK64( 3, 0 ); |
| 1545 | is_split = ((val >> 12)&1) ? true : false; |
| 1546 | |
| 1547 | /* niagara catches attempts to create TSB spans larger than */ |
| 1548 | /* legal VA span */ |
| 1549 | if (tsb_size >= 11 && tsbinfop->page_size == 5) goto data_access_exception; |
| 1550 | |
| 1551 | tsbinfop->is_split = is_split; |
| 1552 | tsbinfop->tsb_size = tsb_size; |
| 1553 | tsbinfop->base_addr = val & ( is_split ? MASK64(63,14+tsb_size) : MASK64(63,13+tsb_size) ); |
| 1554 | } |
| 1555 | break; |
| 1556 | |
| 1557 | case SS_ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1: /* 0x36 RW 0 Y IMMU Context Zero TSB Base PS1 */ |
| 1558 | tsbinfop = &(nsp->immu_ctxt_zero_tsb_ps1); |
| 1559 | goto mmu_tsb_base; |
| 1560 | |
| 1561 | case SS_ASI_IMMU_CTXT_ZERO_CONFIG: /* 0x37 RW 0 Y DMMU Context Zero Config Register */ |
| 1562 | tsbinfop = &(nsp->immu_ctxt_zero_tsb_ps0); |
| 1563 | tsbinfop1 = &(nsp->immu_ctxt_zero_tsb_ps1); |
| 1564 | mmu_tsb_config:; |
| 1565 | /* FIXME: what about non VA=0x0 accesses ? what about if new page-size + tsb size > span faults ? */ |
| 1566 | if (is_load) { |
| 1567 | val = ((uint64_t)tsbinfop1->page_size << 8) | ((uint64_t)tsbinfop->page_size); |
| 1568 | DBGMMU( lprintf(sp->gid, "MMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 1569 | goto load_complete; |
| 1570 | } else { |
| 1571 | static uint8_t supported[8]={ 1, 1, 0, 1, 0, 1, 0, 0 }; |
| 1572 | DBGMMU( lprintf(sp->gid, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 1573 | tsbinfop1->page_size = (val >> 8) & 0x7; |
| 1574 | if (!supported[tsbinfop1->page_size]) |
| 1575 | tsbinfop1->page_size = 5; |
| 1576 | tsbinfop->page_size = val & 0x7; |
| 1577 | if (!supported[tsbinfop->page_size]) |
| 1578 | tsbinfop->page_size = 5; |
| 1579 | } |
| 1580 | break; |
| 1581 | |
| 1582 | case SS_ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0: /* 0x39 RW 0 Y DMMU Context Nonzero TSB Base PS0 */ |
| 1583 | tsbinfop = &(nsp->dmmu_ctxt_nonzero_tsb_ps0); |
| 1584 | goto mmu_tsb_base; |
| 1585 | |
| 1586 | case SS_ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1: /* 0x3A RW 0 Y DMMU Context Nonzero TSB Base PS1 */ |
| 1587 | tsbinfop = &(nsp->dmmu_ctxt_nonzero_tsb_ps1); |
| 1588 | goto mmu_tsb_base; |
| 1589 | |
| 1590 | case SS_ASI_DMMU_CTXT_NONZERO_CONFIG: /* 0x3B RW 0 Y DMMU Context Zero Config Register */ |
| 1591 | tsbinfop = &(nsp->dmmu_ctxt_nonzero_tsb_ps0); |
| 1592 | tsbinfop1 = &(nsp->dmmu_ctxt_nonzero_tsb_ps1); |
| 1593 | goto mmu_tsb_config; |
| 1594 | |
| 1595 | case SS_ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0: /* 0x3D RW 0 Y IMMU Context Nonzero TSB Base PS0 */ |
| 1596 | tsbinfop = &(nsp->immu_ctxt_nonzero_tsb_ps0); |
| 1597 | goto mmu_tsb_base; |
| 1598 | |
| 1599 | case SS_ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1: /* 0x3E RW 0 Y IMMU Context Nonzero TSB Base PS1 */ |
| 1600 | tsbinfop = &(nsp->immu_ctxt_nonzero_tsb_ps1); |
| 1601 | goto mmu_tsb_base; |
| 1602 | |
| 1603 | case SS_ASI_IMMU_CTXT_NONZERO_CONFIG: /* 0x3F RW 0 Y DMMU Context Zero Config Register */ |
| 1604 | tsbinfop = &(nsp->immu_ctxt_nonzero_tsb_ps0); |
| 1605 | tsbinfop1 = &(nsp->immu_ctxt_nonzero_tsb_ps1); |
| 1606 | goto mmu_tsb_config; |
| 1607 | |
| 1608 | #if INTERNAL_BUILD /* { */ |
| 1609 | case SS_ASI_STREAM_MA: /* 0x40 RW 0 N Asynchronous Streaming Control Register */ |
| 1610 | /* 0x40 RW 8 N SRC Register: Asynchronous Strm state */ |
| 1611 | /* 0x40 RW 10 N DEST Register: Asynchronous Strm state */ |
| 1612 | /* 0x40 RW 18 N DATA Register: Asynchronous Strm state */ |
| 1613 | /* 0x40 RW 20 N Chaining initialisation vector for DES /3DES */ |
| 1614 | /* 0x40 RW 28 N DES Key 1 */ |
| 1615 | /* 0x40 RW 30 N DES Key 2 */ |
| 1616 | /* 0x40 RW 38 N DES Key 3 */ |
| 1617 | /* 0x40 RW 40 N HASH STATE REG 1 */ |
| 1618 | /* 0x40 RW 48 N HASH STATE REG 2 */ |
| 1619 | /* 0x40 RW 50 N HASH STATE REG 3 */ |
| 1620 | /* 0x40 RW 68 N Wait for async stream operation to complete */ |
| 1621 | /* 0x40 RW 80 N Modular Arithmetic Control Register */ |
| 1622 | /* 0x40 RW 88 N Modular Arithmetic Physical Address Register(MPA) */ |
| 1623 | /* 0x40 RW 90 N Modular Arith. Memory Address Register(MA_ADDR) */ |
| 1624 | /* 0x40 RW 98 N Modular Arithmetic NP Register */ |
| 1625 | /* 0x40 RW A0 N Wait for async MA operation to complete */ |
| 1626 | { |
| 1627 | uint_t rv; |
| 1628 | |
| 1629 | rv = modarith_cpu_access(sp, addr, op, &val); |
| 1630 | |
| 1631 | switch (rv) { |
| 1632 | case MOD_ARITH_DONE: |
| 1633 | break; |
| 1634 | case MOD_ARITH_LD_COMPLETE: |
| 1635 | goto load_complete; |
| 1636 | case MOD_ARITH_DATA_ACCESS_EX_TRAP: |
| 1637 | EXEC_WARNING(("DAX in ASI_STREAM_MA\n")); |
| 1638 | goto data_access_exception; |
| 1639 | case MOD_ARITH_ILLEGAL_INST_TRAP: |
| 1640 | /* No version of Niagara does this... */ |
| 1641 | v9p->post_precise_trap(sp, |
| 1642 | Sparcv9_trap_illegal_instruction); |
| 1643 | return; |
| 1644 | case MOD_ARITH_FATAL: |
| 1645 | IMPL_WARNING(("fatal error during mod_arith processing")); |
| 1646 | fatal("fatal error during mod_arith processing"); |
| 1647 | return; /* never actually executed */ |
| 1648 | default: |
| 1649 | IMPL_WARNING(("unknown rv (0x%x) during mod_arith " |
| 1650 | "processing", rv)); |
| 1651 | fatal("fatal error during mod_arith processing"); |
| 1652 | return; /* never actually executed */ |
| 1653 | } |
| 1654 | } |
| 1655 | |
| 1656 | break; |
| 1657 | #endif /* } INTERNAL_BUILD */ |
| 1658 | |
| 1659 | case SS_ASI_LSU_DIAG_REG: /* 0x42 RW 0 N Sparc BIST control register */ /* SPARC_BIST_CONTROL */ |
| 1660 | /* 0x42 RW 8 N Sparc Instruction Mask Register */ /* INST_MASK_REG */ |
| 1661 | /* 0x42 RW 10 N Load/Store Unit Diagnostic Register */ /* LSU_DIAG_REG */ |
| 1662 | |
| 1663 | if (is_load) { |
| 1664 | switch(addr) { |
| 1665 | case 0x0: |
| 1666 | val = nsp->icachep->bist_ctl; |
| 1667 | goto load_complete; |
| 1668 | case 0x8: |
| 1669 | val = nsp->icachep->inst_mask; |
| 1670 | goto load_complete; |
| 1671 | case 0x10: |
| 1672 | val = (nsp->dcachep->assocdis ? 2 : 0) | |
| 1673 | (nsp->icachep->assocdis ? 1 : 0); |
| 1674 | goto load_complete; |
| 1675 | default: |
| 1676 | break; |
| 1677 | } |
| 1678 | } else { |
| 1679 | switch(addr) { |
| 1680 | case 0x0: |
| 1681 | nsp->icachep->bist_ctl = val & 0x7f; |
| 1682 | if (val & 1) nsp->icachep->bist_ctl |= 0x400; |
| 1683 | goto complete; |
| 1684 | case 0x8: |
| 1685 | nsp->icachep->inst_mask = val; |
| 1686 | goto complete; |
| 1687 | case 0x10: |
| 1688 | if (val & 2) nsp->dcachep->assocdis = true; |
| 1689 | if (val & 1) nsp->icachep->assocdis = true; |
| 1690 | goto complete; |
| 1691 | default: |
| 1692 | break; |
| 1693 | } |
| 1694 | } |
| 1695 | goto data_access_exception; |
| 1696 | |
| 1697 | case SS_ASI_ERROR_INJECT_REG: /* 0x43 RW 0 N Sparc Error Injection Register */ |
| 1698 | ITODO(SS_ASI_ERROR_INJECT_REG); |
| 1699 | break; |
| 1700 | |
| 1701 | case SS_ASI_STM_CTL_REG: /* 0x44 RW 0 N Self-timed Margin Control Register */ |
| 1702 | ITODO(SS_ASI_STM_CTL_REG); |
| 1703 | break; |
| 1704 | |
| 1705 | case SS_ASI_LSU_CONTROL_REG: /* 0x45 RW 0 Y Load/Store Unit Control Register */ |
| 1706 | switch(addr) { |
| 1707 | case 0x0: |
| 1708 | if (is_load) { |
| 1709 | val = (nsp->lsu_control_raw & ~(LSU_CTRL_DMMU_EN | LSU_CTRL_IMMU_EN)) | |
| 1710 | (nsp->dmmu.enabled ? LSU_CTRL_DMMU_EN : 0LL) | |
| 1711 | (nsp->immu.enabled ? LSU_CTRL_IMMU_EN : 0LL); |
| 1712 | goto load_complete; |
| 1713 | } else { |
| 1714 | /* |
| 1715 | * can only issue this in hpriv mode, so even though we turn the mmu |
| 1716 | * on and off, we dont need to flush the x and d translation caches |
| 1717 | * because in hpriv mode we're only fetching physical addressses. |
| 1718 | */ |
| 1719 | ASSERT( V9_RED == v9p->state || V9_HyperPriv == v9p->state ); |
| 1720 | |
| 1721 | val &= LSU_CTRL_REG_MASK; |
| 1722 | if ((val & (LSU_CTRL_WATCH_VR|LSU_CTRL_WATCH_VW)) != 0) { |
| 1723 | IMPL_WARNING(("ASI_LSU_CONTROL_REG watchpoint enable unimplemented @ pc=%lx\n", sp->pc)); |
| 1724 | } |
| 1725 | nsp->lsu_control_raw = val; |
| 1726 | nsp->dmmu.enabled = (val & LSU_CTRL_DMMU_EN) != 0; |
| 1727 | nsp->immu.enabled = (val & LSU_CTRL_IMMU_EN) != 0; |
| 1728 | sp->xicache_trans_flush_pending = true; |
| 1729 | sp->xdcache_trans_flush_pending = true; |
| 1730 | } |
| 1731 | break; |
| 1732 | default: |
| 1733 | goto data_access_exception; |
| 1734 | } |
| 1735 | break; |
| 1736 | |
| 1737 | case SS_ASI_DCACHE_DATA: /* 0x46 RW - N Dcache data array diagnostics access */ |
| 1738 | |
| 1739 | if (is_load) { |
| 1740 | uint64_t idx, lineword, tag; |
| 1741 | |
| 1742 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ |
| 1743 | lineword = addr&SS_DCACHE_DATA_BITS; |
| 1744 | tag = (addr&SS_DCACHE_DATA_TAG_BITS)>>10; |
| 1745 | |
| 1746 | RW_rdlock(&nsp->dcachep->rwlock); |
| 1747 | /* |
| 1748 | * must match tag to load data |
| 1749 | * iterate over 4 ways at bits [12:11] |
| 1750 | */ |
| 1751 | for (idx=lineword+0x1800; idx>=lineword; idx=idx-0x800) { |
| 1752 | if (nsp->dcachep->tagp[idx] == tag) { |
| 1753 | val = nsp->dcachep->datap[idx]; |
| 1754 | break; |
| 1755 | } |
| 1756 | EXEC_WARNING( ("ASI_DCACHE_DATA load tag 0x%xll has no match", |
| 1757 | addr&SS_DCACHE_DATA_TAG_BITS) ); |
| 1758 | } |
| 1759 | RW_unlock(&nsp->dcachep->rwlock); |
| 1760 | goto load_complete; |
| 1761 | } else { |
| 1762 | uint64_t idx; |
| 1763 | |
| 1764 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ |
| 1765 | idx = (addr&SS_DCACHE_DATA_BITS)>>3; |
| 1766 | |
| 1767 | RW_wrlock(&nsp->dcachep->rwlock); |
| 1768 | nsp->dcachep->datap[idx] = val; |
| 1769 | RW_unlock(&nsp->dcachep->rwlock); |
| 1770 | goto complete; |
| 1771 | } |
| 1772 | |
| 1773 | case SS_ASI_DCACHE_TAG: /* 0x47 RW - N Dcache tag and valid bit diagnostics access */ |
| 1774 | |
| 1775 | if (is_load) { |
| 1776 | uint64_t idx; |
| 1777 | |
| 1778 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ |
| 1779 | idx = (addr&SS_DCACHE_TAG_WAYLINE_BITS)>>4; |
| 1780 | |
| 1781 | RW_rdlock(&nsp->dcachep->rwlock); |
| 1782 | val = nsp->dcachep->tagp[idx]; |
| 1783 | RW_unlock(&nsp->dcachep->rwlock); |
| 1784 | goto load_complete; |
| 1785 | } else { |
| 1786 | uint64_t idx; |
| 1787 | |
| 1788 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ |
| 1789 | idx = (addr&SS_DCACHE_TAG_WAYLINE_BITS)>>4; |
| 1790 | |
| 1791 | RW_wrlock(&nsp->dcachep->rwlock); |
| 1792 | nsp->dcachep->tagp[idx] = val; |
| 1793 | RW_unlock(&nsp->dcachep->rwlock); |
| 1794 | goto complete; |
| 1795 | } |
| 1796 | |
| 1797 | case SS_ASI_INTR_DISPATCH_STATUS: /* 0x48 - - - any type of access causes data_access_exception */ |
| 1798 | case SS_ASI_INTR_RECEIVE: /* 0x49 - - - any type of access causes data_access_exception */ |
| 1799 | case SS_ASI_UPA_CONFIG_REGISTER: /* 0x4A - - - any type of access causes data_access_exception */ |
| 1800 | goto data_access_exception; |
| 1801 | |
| 1802 | case SS_ASI_SPARC_ERROR_EN_REG: /* 0x4B RW 0 N Sparc error enable reg(synchronous ecc/parity errors) */ |
| 1803 | if (0LL != addr) goto data_access_exception; |
| 1804 | if (is_load) { |
| 1805 | val = nsp->error.enabled; |
| 1806 | goto load_complete; |
| 1807 | } else { |
| 1808 | nsp->error.enabled = (val & (NA_CEEN | NA_NCEEN)); |
| 1809 | } |
| 1810 | break; |
| 1811 | |
| 1812 | case SS_ASI_SPARC_ERROR_STATUS_REG: /* 0x4C RW 0 Y Sparc error status reg */ |
| 1813 | if (0LL != addr) goto data_access_exception; |
| 1814 | if (is_load) { |
| 1815 | val = nsp->error.status; |
| 1816 | goto load_complete; |
| 1817 | } else { |
| 1818 | nsp->error.status &= ~val; |
| 1819 | } |
| 1820 | break; |
| 1821 | |
| 1822 | case SS_ASI_SPARC_ERROR_ADDRESS_REG: /* 0x4D RW 0 Y Sparc error address reg */ |
| 1823 | if (0LL != addr || !is_load) goto data_access_exception; |
| 1824 | val = nsp->error.addr; |
| 1825 | goto load_complete; |
| 1826 | |
| 1827 | case SS_ASI_ECACHE_TAG_DATA: /* 0x4E - - - any type of access causes data_access_exception */ |
| 1828 | goto data_access_exception; |
| 1829 | |
| 1830 | case SS_ASI_HYP_SCRATCHPAD: |
| 1831 | /* |
| 1832 | * Niagara1/N2 : |
| 1833 | * 0x4F RW 0-38 Y Hypervisor Scratchpad |
| 1834 | * Rock : |
| 1835 | * 0x4F RW 0-18 Y Hypervisor Scratchpad |
| 1836 | */ |
| 1837 | |
| 1838 | if (INVALID_HYP_SCRATCHPAD(addr)) { |
| 1839 | goto data_access_exception; |
| 1840 | } else { |
| 1841 | uint64_t *valp = |
| 1842 | &(nsp->strand_reg[SSR_HSCRATCHPAD_INDEX + (addr>>3)]); |
| 1843 | if (is_load) { |
| 1844 | val = *valp; |
| 1845 | goto load_complete; |
| 1846 | } |
| 1847 | DBGSCRATCH( if (*valp != val) |
| 1848 | lprintf(sp->gid, "SCRATCH store 0x%x/0x%llx: " |
| 1849 | "0x%llx -> 0x%llx pc=0x%llx\n", |
| 1850 | asi, addr, *valp, val, sp->pc); ); |
| 1851 | *valp = val; |
| 1852 | } |
| 1853 | break; |
| 1854 | |
| 1855 | case SS_ASI_IMMU: /* 0x50 R 0 Y IMMU Tag Target register */ |
| 1856 | /* 0x50 RW 18 Y IMMU Synchronous Fault Status Register */ |
| 1857 | /* 0x50 RW 30 Y IMMU TLB Tag Access Register */ |
| 1858 | mmup = &(nsp->immu); |
| 1859 | |
| 1860 | if (is_load) { |
| 1861 | switch(addr) { |
| 1862 | case 0x0: |
| 1863 | tag_target_read:; |
| 1864 | VA48_ASSERT(mmup->tag_access_reg); |
| 1865 | val = (mmup->tag_access_reg >> 22) | ((mmup->tag_access_reg&MASK64(12,0))<<48); |
| 1866 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 1867 | goto load_complete; |
| 1868 | case 0x18: |
| 1869 | read_sfsr:; |
| 1870 | val = mmup->sfsr; |
| 1871 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 1872 | goto load_complete; |
| 1873 | case 0x30: |
| 1874 | tag_access_read:; |
| 1875 | val = mmup->tag_access_reg; |
| 1876 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 1877 | VA48_ASSERT(val); |
| 1878 | goto load_complete; |
| 1879 | default: |
| 1880 | break; |
| 1881 | } |
| 1882 | } else { |
| 1883 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 1884 | switch(addr) { |
| 1885 | case 0x18: |
| 1886 | write_sfsr:; |
| 1887 | mmup->sfsr = val & MMU_SFSR_MASK; |
| 1888 | sp->xicache_trans_flush_pending = true; |
| 1889 | goto complete; |
| 1890 | case 0x30: |
| 1891 | tag_access_write:; |
| 1892 | VA48_WARNING(sp, val); |
| 1893 | mmup->tag_access_reg = VA48(val); |
| 1894 | DBGMMU( lprintf(sp->gid, "ASI 0x%x : %cMMU tag access = 0x%llx\n", asi, mmup->is_immu ? 'I' : 'D', mmup->tag_access_reg); ); |
| 1895 | goto complete; |
| 1896 | default: |
| 1897 | break; |
| 1898 | } |
| 1899 | } |
| 1900 | goto data_access_exception; |
| 1901 | |
| 1902 | case SS_ASI_IMMU_TSB_PS0_PTR_REG: /* 0x51 R 0 Y IMMU TSB PS0 pointer register */ |
| 1903 | mmup = &(nsp->immu); |
| 1904 | read_tsb_ps0_ptr:; |
| 1905 | if (!is_load) goto data_access_exception; |
| 1906 | if (mmup == &nsp->dmmu) { |
| 1907 | if ((mmup->tag_access_reg & MASK64(12,0)) == 0) |
| 1908 | tsbinfop = &nsp->dmmu_ctxt_zero_tsb_ps0; |
| 1909 | else |
| 1910 | tsbinfop = &nsp->dmmu_ctxt_nonzero_tsb_ps0; |
| 1911 | } else { |
| 1912 | if ((mmup->tag_access_reg & MASK64(12,0)) == 0) |
| 1913 | tsbinfop = &nsp->immu_ctxt_zero_tsb_ps0; |
| 1914 | else |
| 1915 | tsbinfop = &nsp->immu_ctxt_nonzero_tsb_ps0; |
| 1916 | } |
| 1917 | val = 0; |
| 1918 | goto common_make_tsb_ptr; |
| 1919 | |
| 1920 | case SS_ASI_IMMU_TSB_PS1_PTR_REG: /* 0x52 R 0 Y IMMU TSB PS1 pointer register */ |
| 1921 | mmup = &(nsp->immu); |
| 1922 | read_tsb_ps1_ptr:; |
| 1923 | if (!is_load) goto data_access_exception; |
| 1924 | if (mmup == &nsp->dmmu) { |
| 1925 | if ((mmup->tag_access_reg & MASK64(12,0)) == 0) |
| 1926 | tsbinfop = &nsp->dmmu_ctxt_zero_tsb_ps1; |
| 1927 | else |
| 1928 | tsbinfop = &nsp->dmmu_ctxt_nonzero_tsb_ps1; |
| 1929 | } else { |
| 1930 | if ((mmup->tag_access_reg & MASK64(12,0)) == 0) |
| 1931 | tsbinfop = &nsp->immu_ctxt_zero_tsb_ps1; |
| 1932 | else |
| 1933 | tsbinfop = &nsp->immu_ctxt_nonzero_tsb_ps1; |
| 1934 | } |
| 1935 | if (tsbinfop->is_split) |
| 1936 | val = 1ull << (13+tsbinfop->tsb_size); |
| 1937 | else |
| 1938 | val = 0; |
| 1939 | common_make_tsb_ptr:; |
| 1940 | /* |
| 1941 | * base_addr was masked when the TSB base was written, |
| 1942 | * so no need to mask again here. |
| 1943 | */ |
| 1944 | val |= tsbinfop->base_addr; |
| 1945 | val |= (mmup->tag_access_reg >> (13 - 4 + tsbinfop->page_size * 3)) & MASK64((9+tsbinfop->tsb_size+4-1), 4); |
| 1946 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 1947 | goto load_complete; |
| 1948 | |
| 1949 | case SS_ASI_ITLB_DATA_IN_REG: /* 0x54 W 0 N IMMU data in register */ |
| 1950 | tlbp = nsp->itlbp; |
| 1951 | mmup = &(nsp->immu); |
| 1952 | |
| 1953 | tlb_data_in_valid_check:; |
| 1954 | /* |
| 1955 | * Check for attempts to load this ASI -or- invalid PA |
| 1956 | * (only bits 10-9 should be set) |
| 1957 | */ |
| 1958 | if (is_load || (addr & ~MASK64(10,9))!=0) goto data_access_exception; |
| 1959 | |
| 1960 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 1961 | idx = SS_TLB_LRU; |
| 1962 | tlb_data_in:; |
| 1963 | is_real = SS_TLB_IS_REAL(addr); |
| 1964 | |
| 1965 | if ((addr>>10)&1) val = niagara_shuffle_sun4v_format(val); |
| 1966 | |
| 1967 | if (!ss_tlb_insert(sp, mmup, tlbp, nsp->partid, is_real, val, idx)) |
| 1968 | goto data_access_exception; |
| 1969 | |
| 1970 | goto complete; |
| 1971 | |
| 1972 | case SS_ASI_ITLB_DATA_ACCESS_REG: /* 0x55 RW 0-1F8 N IMMU TLB Data Access Register */ |
| 1973 | tlbp = nsp->itlbp; |
| 1974 | mmup = &(nsp->immu); |
| 1975 | tlb_data_access:; |
| 1976 | |
| 1977 | /* Check for valid tlb index */ |
| 1978 | idx = (addr >> 3) & 0x3f; |
| 1979 | if (idx >= tlbp->nentries) goto data_access_exception; |
| 1980 | |
| 1981 | if (!is_load) { |
| 1982 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 1983 | /* |
| 1984 | * Store |
| 1985 | * |
| 1986 | * Check for invalid PA (only bits 10-3 should be set) |
| 1987 | */ |
| 1988 | if ((addr & ~MASK64(10,3))!=0) |
| 1989 | goto data_access_exception; |
| 1990 | |
| 1991 | goto tlb_data_in; |
| 1992 | |
| 1993 | } else { |
| 1994 | /* |
| 1995 | * Load |
| 1996 | */ |
| 1997 | tlb_entry_t * tep; |
| 1998 | |
| 1999 | #if ERROR_INJECTION |
| 2000 | if (sp->error_check == true && |
| 2001 | (ep = find_errconf(sp, ASI_LD, IMDU|DMDU))) { |
| 2002 | if (ep->type == IMDU && mmup->is_immu) { |
| 2003 | sp->errorp->tlb_idx[IMDU_IDX] = idx; |
| 2004 | ss_error_condition(sp, ep); |
| 2005 | return; |
| 2006 | } else |
| 2007 | if (ep->type == DMDU && !mmup->is_immu) { |
| 2008 | sp->errorp->tlb_idx[DMDU_IDX] = idx; |
| 2009 | ss_error_condition(sp, ep); |
| 2010 | return; |
| 2011 | } |
| 2012 | } |
| 2013 | #endif |
| 2014 | RW_rdlock(&tlbp->rwlock); |
| 2015 | tep = &tlbp->tlb_entryp[idx]; |
| 2016 | val = tep->data; |
| 2017 | RW_unlock(&tlbp->rwlock); |
| 2018 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 2019 | goto load_complete; |
| 2020 | } |
| 2021 | |
| 2022 | case SS_ASI_ITLB_TAG_READ_REG: /* 0x56 R 0-1F8 N IMMU TLB Tag Read Register */ |
| 2023 | tlbp = nsp->itlbp; |
| 2024 | mmup = &(nsp->immu); |
| 2025 | tlb_tag_read:; |
| 2026 | if (is_load) { |
| 2027 | tlb_entry_t * tep; |
| 2028 | |
| 2029 | idx = addr >> 3; |
| 2030 | if (idx >= tlbp->nentries) goto data_access_exception; |
| 2031 | #if ERROR_INJECTION |
| 2032 | if (sp->error_check == true && |
| 2033 | (ep = find_errconf(sp, ASI_LD, IMTU|DMTU))) { |
| 2034 | if (ep->type == IMTU && mmup->is_immu) { |
| 2035 | sp->errorp->tlb_idx[IMTU_IDX] = idx; |
| 2036 | ss_error_condition(sp, ep); |
| 2037 | return; |
| 2038 | } else |
| 2039 | if (ep->type == DMTU && !mmup->is_immu) { |
| 2040 | sp->errorp->tlb_idx[DMTU_IDX] = idx; |
| 2041 | ss_error_condition(sp, ep); |
| 2042 | return; |
| 2043 | } |
| 2044 | } |
| 2045 | #endif |
| 2046 | RW_rdlock(&tlbp->rwlock); |
| 2047 | tep = &tlbp->tlb_entryp[idx]; |
| 2048 | val = ((uint64_t)tep->partid << 61) | |
| 2049 | ((uint64_t)(tep->is_real?1:0) << 60); |
| 2050 | val |= (tep->tag_pfn & MASK64(55, 13)) | (uint64_t)tep->tag_context; |
| 2051 | RW_unlock(&tlbp->rwlock); |
| 2052 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 2053 | goto load_complete; |
| 2054 | } |
| 2055 | goto data_access_exception; |
| 2056 | |
| 2057 | case SS_ASI_IMMU_DEMAP: /* 0x57 W 0 Y IMMU TLB Demap */ |
| 2058 | mmup = &(nsp->immu); |
| 2059 | tlbp = nsp->itlbp; |
| 2060 | tlb_demap:; |
| 2061 | if (is_load) goto data_access_exception; |
| 2062 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 2063 | { |
| 2064 | ss_demap_t op; |
| 2065 | uint_t context; |
| 2066 | |
| 2067 | op = (ss_demap_t) ((addr>>6)&0x3); |
| 2068 | |
| 2069 | switch ((addr >> 4)&0x3) { |
| 2070 | case 0x0: context = nsp->pri_context; break; /* primary context */ |
| 2071 | case 0x1: context = nsp->sec_context; break; /* secondary context */ |
| 2072 | case 0x2: context = SS_NUCLEUS_CONTEXT; break; /* nucleus context */ |
| 2073 | case 0x3: |
| 2074 | /* |
| 2075 | * use of reserved value is valid but causes |
| 2076 | * demap to be ignored for the following two ops |
| 2077 | */ |
| 2078 | if (op==NA_demap_page || op==NA_demap_context) |
| 2079 | goto complete; |
| 2080 | } |
| 2081 | |
| 2082 | if (op == NA_demap_page) { |
| 2083 | if ((addr & BIT(47)) == 0) { |
| 2084 | if ((addr & MASK64(63, 48)) != 0) { |
| 2085 | EXEC_WARNING(("(@pc=0x%llx) demap " |
| 2086 | "address range " |
| 2087 | "asi=0x%x va=0x%llx", |
| 2088 | sp->pc, asi, addr)); |
| 2089 | } |
| 2090 | addr &= MASK64(47, 0); |
| 2091 | } else { |
| 2092 | if ((addr & MASK64(63, 48)) != MASK64(63, 48)) { |
| 2093 | EXEC_WARNING(("(@pc=0x%llx) demap " |
| 2094 | "address range " |
| 2095 | "asi=0x%x va=0x%llx", |
| 2096 | sp->pc, asi, addr)); |
| 2097 | } |
| 2098 | addr |= MASK64(63, 48); |
| 2099 | } |
| 2100 | } |
| 2101 | |
| 2102 | is_real = SS_TLB_IS_REAL(addr); |
| 2103 | if (!ss_demap(sp, op, mmup, tlbp, nsp->partid, is_real, context, addr)) goto data_access_exception; |
| 2104 | } |
| 2105 | goto complete; |
| 2106 | |
| 2107 | |
| 2108 | case SS_ASI_DMMU: /* 0x58 R 0 Y D-MMU Tag Target Register */ |
| 2109 | /* 0x58 RW 18 Y DMMU Synchronous Fault Status Register */ |
| 2110 | /* 0x58 R 20 Y DMMU Synchronous Fault Address Register */ |
| 2111 | /* 0x58 RW 30 Y DMMU TLB Tag Access Register */ |
| 2112 | /* 0x58 RW 38 Y DMMU VA Data Watchpoint Register */ |
| 2113 | /* 0x58 RW 40 Y Niagara 2: Tablewalk Config Reg */ |
| 2114 | /* 0x58 RW 80 Y I/DMMU Partition ID */ |
| 2115 | mmup = &(nsp->dmmu); |
| 2116 | if (is_load) { |
| 2117 | switch(addr) { |
| 2118 | case 0x0: |
| 2119 | goto tag_target_read; |
| 2120 | case 0x18: |
| 2121 | goto read_sfsr; |
| 2122 | case 0x20: |
| 2123 | val = mmup->sfar; |
| 2124 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 2125 | VA48_ASSERT(val); |
| 2126 | goto load_complete; |
| 2127 | case 0x30: |
| 2128 | goto tag_access_read; |
| 2129 | case 0x38: |
| 2130 | val = mmup->va_watchpoint; |
| 2131 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 2132 | goto load_complete; |
| 2133 | case 0x80: |
| 2134 | val = (uint64_t)(nsp->partid); |
| 2135 | DBGMMU( lprintf(sp->gid, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 2136 | goto load_complete; |
| 2137 | default: |
| 2138 | break; |
| 2139 | } |
| 2140 | } else { |
| 2141 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 2142 | switch(addr) { |
| 2143 | case 0x18: |
| 2144 | goto write_sfsr; |
| 2145 | case 0x30: |
| 2146 | goto tag_access_write; |
| 2147 | case 0x38: |
| 2148 | mmup->va_watchpoint = VA48(val); |
| 2149 | goto complete; |
| 2150 | case 0x80: |
| 2151 | /* can only do in hypervisor mode - switching mode causes the xi/xd |
| 2152 | * cache flush anyway |
| 2153 | */ |
| 2154 | nsp->partid = val & 0x7; /* three bits of part id only */ |
| 2155 | sp->xicache_trans_flush_pending = true; |
| 2156 | sp->xdcache_trans_flush_pending = true; |
| 2157 | goto complete; |
| 2158 | default: |
| 2159 | break; |
| 2160 | } |
| 2161 | } |
| 2162 | goto data_access_exception; |
| 2163 | |
| 2164 | case SS_ASI_DMMU_TSB_PS0_PTR_REG: /* 0x59 R 0 Y DMMU TSB PS0 pointer register */ |
| 2165 | mmup = &(nsp->dmmu); |
| 2166 | goto read_tsb_ps0_ptr; |
| 2167 | |
| 2168 | case SS_ASI_DMMU_TSB_PS1_PTR_REG: /* 0x5A R 0 Y DMMU TSB PS1 pointer register */ |
| 2169 | mmup = &(nsp->dmmu); |
| 2170 | goto read_tsb_ps1_ptr; |
| 2171 | |
| 2172 | case SS_ASI_DMMU_TSB_DIRECT_PTR_REG: /* 0x5B R 0 Y DMMU TSB Direct pointer register */ |
| 2173 | if (!is_load) goto data_access_exception; |
| 2174 | mmup = &(nsp->dmmu); |
| 2175 | if (mmup->tsb_direct_ps1) |
| 2176 | goto read_tsb_ps1_ptr; |
| 2177 | goto read_tsb_ps0_ptr; |
| 2178 | |
| 2179 | case SS_ASI_DTLB_DATA_IN_REG: /* 0x5C W 0 N DMMU data in register */ |
| 2180 | tlbp = nsp->dtlbp; |
| 2181 | mmup = &(nsp->dmmu); |
| 2182 | goto tlb_data_in_valid_check; |
| 2183 | |
| 2184 | case SS_ASI_DTLB_DATA_ACCESS_REG: /* 0x5D RW 0-1F8 N DMMU TLB Data Access Register */ |
| 2185 | tlbp = nsp->dtlbp; |
| 2186 | mmup = &(nsp->dmmu); |
| 2187 | goto tlb_data_access; |
| 2188 | |
| 2189 | case SS_ASI_DTLB_TAG_READ_REG: /* 0x5E R 0-1F8 N DMMU TLB Tag Read Register */ |
| 2190 | tlbp = nsp->dtlbp; |
| 2191 | mmup = &(nsp->dmmu); |
| 2192 | goto tlb_tag_read; |
| 2193 | |
| 2194 | case SS_ASI_DMMU_DEMAP: /* 0x5F W 0 Y DMMU TLB Demap */ |
| 2195 | mmup = &(nsp->dmmu); |
| 2196 | tlbp = nsp->dtlbp; |
| 2197 | goto tlb_demap; |
| 2198 | |
| 2199 | case SS_ASI_TLB_INVALIDATE_ALL: /* 0x60 W 0 N IMMU TLB Invalidate Register */ |
| 2200 | /* 0x60 W 8 N DMMU TLB Invalidate Register */ |
| 2201 | if (is_load || !(addr==0x0 || addr==0x8)) goto data_access_exception; |
| 2202 | if (addr == 0) { |
| 2203 | mmup = &nsp->immu; |
| 2204 | tlbp = nsp->itlbp; |
| 2205 | } else { |
| 2206 | mmup = &nsp->dmmu; |
| 2207 | tlbp = nsp->dtlbp; |
| 2208 | } |
| 2209 | DBGMMU( lprintf(sp->gid, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup->is_immu ? 'I' : 'D', asi, addr, val, sp->pc); ); |
| 2210 | if (!ss_demap(sp, NA_demap_init, mmup, tlbp, SS_TLB_INVALID_PARTID, false, SS_TLB_INVALID_CONTEXT, 0)) |
| 2211 | goto data_access_exception; |
| 2212 | goto complete; |
| 2213 | |
| 2214 | case SS_ASI_ICACHE_INSTR: /* 0x66 RW - N Icache data array diagnostics access */ |
| 2215 | |
| 2216 | if (is_load) { |
| 2217 | uint64_t idx; |
| 2218 | |
| 2219 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ |
| 2220 | idx = ((addr&SS_ICACHE_DATA_LINEWORD_BITS)|((addr&SS_ICACHE_DATA_WAY_BITS)>>3))>>3; |
| 2221 | |
| 2222 | RW_rdlock(&nsp->icachep->rwlock); |
| 2223 | val = nsp->icachep->datap[idx]; |
| 2224 | RW_unlock(&nsp->icachep->rwlock); |
| 2225 | goto load_complete; |
| 2226 | } else { |
| 2227 | uint64_t idx; |
| 2228 | |
| 2229 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ |
| 2230 | idx = ((addr&SS_ICACHE_DATA_LINEWORD_BITS)|((addr&SS_ICACHE_DATA_WAY_BITS)>>3))>>3; |
| 2231 | |
| 2232 | RW_wrlock(&nsp->icachep->rwlock); |
| 2233 | nsp->icachep->datap[idx] = val; |
| 2234 | RW_unlock(&nsp->icachep->rwlock); |
| 2235 | goto complete; |
| 2236 | } |
| 2237 | |
| 2238 | case SS_ASI_ICACHE_TAG: /* 0x67 RW - N Icache tag and valid bit diagnostics access */ |
| 2239 | |
| 2240 | if (is_load) { |
| 2241 | uint64_t idx; |
| 2242 | |
| 2243 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ |
| 2244 | idx = (((addr&SS_ICACHE_TAG_LINE_BITS)>>3)|((addr&SS_ICACHE_TAG_WAY_BITS)>>6))>>3; |
| 2245 | |
| 2246 | RW_rdlock(&nsp->icachep->rwlock); |
| 2247 | val = nsp->icachep->tagp[idx]; |
| 2248 | RW_unlock(&nsp->icachep->rwlock); |
| 2249 | goto load_complete; |
| 2250 | } else { |
| 2251 | uint64_t idx; |
| 2252 | |
| 2253 | /* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */ |
| 2254 | idx = (((addr&SS_ICACHE_TAG_LINE_BITS)>>3)|((addr&SS_ICACHE_TAG_WAY_BITS)>>6))>>3; |
| 2255 | |
| 2256 | RW_wrlock(&nsp->icachep->rwlock); |
| 2257 | nsp->icachep->tagp[idx] = val; |
| 2258 | RW_unlock(&nsp->icachep->rwlock); |
| 2259 | goto complete; |
| 2260 | } |
| 2261 | |
| 2262 | case SS_ASI_SWVR_INTR_RECEIVE: /* 0x72 RW 0 Y Interrupt Receive Register */ |
| 2263 | if (0LL != addr) goto data_access_exception; |
| 2264 | if (is_load) { |
| 2265 | pthread_mutex_lock(&nsp->irq_lock); |
| 2266 | val = nsp->irq_vector; |
| 2267 | pthread_mutex_unlock(&nsp->irq_lock); |
| 2268 | DBGMONDO( lprintf(sp->gid, "SS_ASI_SWVR_INTR_RECEIVE load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 2269 | goto load_complete; |
| 2270 | } else { |
| 2271 | uint64_t oldval, newval; |
| 2272 | pthread_mutex_lock(&nsp->irq_lock); |
| 2273 | DBGMONDO( oldval = nsp->irq_vector; ); |
| 2274 | nsp->irq_vector &= val; |
| 2275 | DBGMONDO( newval = nsp->irq_vector; ); |
| 2276 | pthread_mutex_unlock(&nsp->irq_lock); |
| 2277 | DBGMONDO( lprintf(sp->gid, "SS_ASI_SWVR_INTR_RECEIVE store 0x%x/0x%llx : 0x%llx irq_vector: 0x%llx -> 0x%llx (pc=0x%llx)\n", asi, addr, val, oldval, newval, sp->pc); ); |
| 2278 | ss_check_interrupts(sp); |
| 2279 | } |
| 2280 | break; |
| 2281 | |
| 2282 | case SS_ASI_SWVR_UDB_INTR_W: /* 0x73 W 0 Y Interrupt Vector Dispatch Register */ |
| 2283 | if (0LL != addr || is_load) goto data_access_exception; |
| 2284 | DBGMONDO( lprintf(sp->gid, "ASI_SWVR_UDB_INTR_W store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 2285 | niagara_send_xirq(sp, val); |
| 2286 | break; |
| 2287 | |
| 2288 | case SS_ASI_SWVR_UDB_INTR_R: /* 0x74 R 0 Y Incoming Vector Register */ |
| 2289 | if (0LL != addr || !is_load) goto data_access_exception; |
| 2290 | pthread_mutex_lock(&nsp->irq_lock); |
| 2291 | { |
| 2292 | uint64_t vec; |
| 2293 | uint8_t bit = 0; |
| 2294 | |
| 2295 | vec = nsp->irq_vector; |
| 2296 | if (vec == 0) { |
| 2297 | val = 0; |
| 2298 | goto udb_intr_r_done; |
| 2299 | } |
| 2300 | if (vec & 0xffffffff00000000ull) { |
| 2301 | bit += 32; vec >>= 32; |
| 2302 | } |
| 2303 | if (vec & 0xffff0000) { |
| 2304 | bit += 16; vec >>= 16; |
| 2305 | } |
| 2306 | if (vec & 0xff00) { |
| 2307 | bit += 8; vec >>= 8; |
| 2308 | } |
| 2309 | if (vec & 0xf0) { |
| 2310 | bit += 4; vec >>= 4; |
| 2311 | } |
| 2312 | if (vec & 0xc) { |
| 2313 | bit += 2; vec >>= 2; |
| 2314 | } |
| 2315 | if (vec & 0x2) { |
| 2316 | bit += 1; |
| 2317 | } |
| 2318 | nsp->irq_vector &= ~((uint64_t)1<<bit); |
| 2319 | |
| 2320 | val = bit; |
| 2321 | } |
| 2322 | udb_intr_r_done:; |
| 2323 | pthread_mutex_unlock(&nsp->irq_lock); |
| 2324 | DBGMONDO( lprintf(sp->gid, "SS_ASI_SWVR_UDB_INTR_R load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi, addr, val, sp->pc); ); |
| 2325 | goto load_complete; |
| 2326 | |
| 2327 | default: |
| 2328 | data_access_exception: |
| 2329 | addr = ((op & MA_Op_Mask) == MA_CAS) ? |
| 2330 | reg1 : (reg1 + reg2); |
| 2331 | niagara_set_sfsr(sp, &nsp->dmmu, addr, MMU_SFSR_FT_ASI, ss_ctx_primary, asi, 1, 0); |
| 2332 | tt = (sparcv9_trap_type_t)Sparcv9_trap_data_access_exception; |
| 2333 | ASSERT(0LL==sp->intreg[Reg_sparcv9_g0]); |
| 2334 | MEMORY_ACCESS_TRAP(); |
| 2335 | v9p->post_precise_trap(sp, tt); |
| 2336 | return; |
| 2337 | } |
| 2338 | |
| 2339 | complete:; |
| 2340 | NEXT_INSTN(sp); |
| 2341 | return; |
| 2342 | |
| 2343 | |
| 2344 | load_complete: |
| 2345 | if (op == MA_Ld ) { |
| 2346 | if (regnum != Reg_sparcv9_g0) sp->intreg[regnum] = val; |
| 2347 | } else { /* op == MA_LdFloat */ |
| 2348 | ASSERT(MA_LdFloat == op); |
| 2349 | switch(size) { |
| 2350 | case MA_Size32: |
| 2351 | sp->fpreg.s32[regnum] = val; |
| 2352 | break; |
| 2353 | case MA_Size64: |
| 2354 | sp->fpreg.s64[regnum >> 1] = val; |
| 2355 | break; |
| 2356 | default: |
| 2357 | goto unimplemented; |
| 2358 | } |
| 2359 | } |
| 2360 | goto complete; |
| 2361 | |
| 2362 | unimplemented: |
| 2363 | IMPL_WARNING(("ASI access (0x%02x) (@pc=0x%llx) to address 0x%llx currently unimplemented", asi, sp->pc, addr)); |
| 2364 | v9p->post_precise_trap(sp, Sparcv9_trap_illegal_instruction); |
| 2365 | return; |
| 2366 | } |
| 2367 | |
| 2368 | |
| 2369 | /* |
| 2370 | * Slow generic memory access .. |
| 2371 | * .. becomes the path for all the accesses we cant handle via the load/store hash |
| 2372 | */ |
| 2373 | |
| 2374 | |
| 2375 | |
| 2376 | void |
| 2377 | ss_memory_asi_access(simcpu_t * sp, maccess_t memop, uint64_t * regp, |
| 2378 | mem_flags_t mflags, uint_t asi, uint_t context_type, |
| 2379 | uint_t align_mask, tvaddr_t va, tvaddr_t reg2) |
| 2380 | { |
| 2381 | sparcv9_cpu_t * v9p; |
| 2382 | ss_strand_t * nsp; |
| 2383 | ss_proc_t * npp; |
| 2384 | ss_l2_cache_t * l2p; |
| 2385 | error_conf_t * ep; |
| 2386 | error_t * errorp; |
| 2387 | tpaddr_t pa; |
| 2388 | tpaddr_t pa_tag; |
| 2389 | tvaddr_t tag, perm_cache; |
| 2390 | uint8_t * bufp; |
| 2391 | uint8_t * ptr; |
| 2392 | config_addr_t * cap; |
| 2393 | tpaddr_t extent; |
| 2394 | uint_t flags; |
| 2395 | uint_t size; |
| 2396 | uint_t op; |
| 2397 | dev_access_t da; |
| 2398 | |
| 2399 | v9p = (sparcv9_cpu_t *)(sp->specificp); |
| 2400 | nsp = v9p->impl_specificp; |
| 2401 | npp = (ss_proc_t *)(sp->config_procp->procp); |
| 2402 | |
| 2403 | mflags ^= (asi & SS_ASI_LE_MASK) ? MF_Little_Endian : 0; |
| 2404 | |
| 2405 | /* OK, derive access address etc. */ |
| 2406 | |
| 2407 | size = memop & MA_Size_Mask; |
| 2408 | op = memop & MA_Op_Mask; |
| 2409 | |
| 2410 | if (MA_CAS != op) { |
| 2411 | va += reg2; |
| 2412 | } |
| 2413 | |
| 2414 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: LE load/store pc=0x%llx instr=0x%x count=%d asi=0x%x\n", sp->pc, op, (1 << size), asi); ); |
| 2415 | |
| 2416 | /* |
| 2417 | * OK - Step 1 : to do or not do a TLB translation. |
| 2418 | * The assumption here is that privilege checks have already happened. |
| 2419 | */ |
| 2420 | |
| 2421 | #if ERROR_INJECTION |
| 2422 | errorp = sp->errorp; |
| 2423 | #endif |
| 2424 | |
| 2425 | /* quick check of alignment */ |
| 2426 | if ((va & (tvaddr_t)align_mask) != 0) { |
| 2427 | sparcv9_trap_type_t tt; |
| 2428 | if (v9p->pstate.addr_mask) |
| 2429 | va &= MASK64(31,0); /* SV9_ID125 FIXME */ |
| 2430 | |
| 2431 | DBGALIGN( lprintf(sp->gid,"Miss data access pc=0x%llx va=0x%llx align_mask=0x%llx\n", sp->pc, va, (tvaddr_t)align_mask); ); |
| 2432 | /* alignment error force a trap */ |
| 2433 | VA48_WARNING(sp, va); |
| 2434 | SET_DTLB_FAULT( nsp, VA48(va) ); |
| 2435 | MEMORY_ACCESS_TRAP(); |
| 2436 | niagara_set_sfsr(sp, &nsp->dmmu, va, 0/*fixme*/, context_type, asi, 0, 0); |
| 2437 | if ((MA_ldfp64 == memop || MA_stfp64 == memop) && |
| 2438 | ((va & 0x7) == 0x4)) |
| 2439 | tt = ((memop == MA_ldfp64) ? |
| 2440 | Sparcv9_trap_LDDF_mem_address_not_aligned : |
| 2441 | Sparcv9_trap_STDF_mem_address_not_aligned); |
| 2442 | else |
| 2443 | tt = Sparcv9_trap_mem_address_not_aligned; |
| 2444 | |
| 2445 | v9p->post_precise_trap(sp, tt); |
| 2446 | return; |
| 2447 | } |
| 2448 | |
| 2449 | /* Find the pa corresponding to the line we need */ |
| 2450 | tag = va & XDCACHE_TAG_MASK; |
| 2451 | |
| 2452 | /* |
| 2453 | * We have to get the PA from the EA ... this depends on the mode |
| 2454 | * and the type of access. |
| 2455 | */ |
| 2456 | |
| 2457 | pa_tag = tag; |
| 2458 | if (v9p->pstate.addr_mask) { |
| 2459 | pa_tag &= MASK64(31,0); |
| 2460 | va &= MASK64(31,0); |
| 2461 | /* NOTE: we dont mask tag ... we allow that to match the 64bit address */ |
| 2462 | } |
| 2463 | |
| 2464 | pa = va; |
| 2465 | flags = SS_TLB_FLAG_READ | SS_TLB_FLAG_WRITE; /* default access flags */ |
| 2466 | |
| 2467 | |
| 2468 | |
| 2469 | /* |
| 2470 | * OK perform the TLB access based on the context |
| 2471 | * and partition id selected |
| 2472 | */ |
| 2473 | |
| 2474 | /* default read and write permission for MMU bypass */ |
| 2475 | perm_cache = XDCACHE_READ_PERM | XDCACHE_WRITE_PERM; |
| 2476 | |
| 2477 | if (!(mflags & MF_MMU_Bypass)) { |
| 2478 | ss_tlb_t * tlbp; |
| 2479 | tlb_entry_t * tep; |
| 2480 | tlb_entry_t te_copy; |
| 2481 | uint_t idx, partid; |
| 2482 | ss_trap_type_t miss_trap_type; |
| 2483 | uint_t context; |
| 2484 | uint_t miss_context; |
| 2485 | |
| 2486 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: performing TLB access \n"); ); |
| 2487 | |
| 2488 | /* If not priv mode and mmu is off, translate real addresses */ |
| 2489 | if (!nsp->dmmu.enabled) |
| 2490 | context = SS_TLB_REAL_CONTEXT; |
| 2491 | else { |
| 2492 | /* figure out the context value */ |
| 2493 | switch (context_type) { |
| 2494 | case ss_ctx_primary: |
| 2495 | context = nsp->pri_context; |
| 2496 | break; |
| 2497 | case ss_ctx_secondary: |
| 2498 | context = nsp->sec_context; |
| 2499 | break; |
| 2500 | case ss_ctx_nucleus: |
| 2501 | if (mflags & MF_TLB_Real_Ctx) |
| 2502 | context = SS_TLB_REAL_CONTEXT; |
| 2503 | else |
| 2504 | context = SS_NUCLEUS_CONTEXT; |
| 2505 | break; |
| 2506 | default: |
| 2507 | fatal("ss_memory_asi_access: Internal Error. Not expecting " |
| 2508 | "context type 0x%x\n", context_type); |
| 2509 | } |
| 2510 | |
| 2511 | } |
| 2512 | /* |
| 2513 | * check out of range address (if lie within the "VA hole" |
| 2514 | * or "RA hole") |
| 2515 | */ |
| 2516 | if ((va >= SS_VA_HOLE_LB) && (va <= SS_VA_HOLE_UB)) { |
| 2517 | niagara_set_sfsr(sp, &nsp->dmmu, va, |
| 2518 | MMU_SFSR_FT_VARANGE, context_type, |
| 2519 | asi, 0/*fixme*/, 1); |
| 2520 | v9p->post_precise_trap(sp, |
| 2521 | (sparcv9_trap_type_t)SS_trap_data_access_exception); |
| 2522 | return; |
| 2523 | } |
| 2524 | |
| 2525 | partid = nsp->partid; |
| 2526 | |
| 2527 | /* FIXME: Need a better hash than this ! */ |
| 2528 | idx = va >> SS_MAX_PAGE_SIZE_BITS; |
| 2529 | idx += context + partid; |
| 2530 | idx &= SS_TLB_HASH_MASK; |
| 2531 | tlbp = nsp->dtlbp; |
| 2532 | RW_rdlock(&tlbp->rwlock); |
| 2533 | /* |
| 2534 | * So we search for a matching page using the info we have in the |
| 2535 | * hash - while another thread might possibly be removing or |
| 2536 | * inserting an entry into the same table. |
| 2537 | */ |
| 2538 | |
| 2539 | |
| 2540 | for ( tep = tlbp->hash[idx].ptr; tep!=(tlb_entry_t*)0; tep = tep->nextp ) { |
| 2541 | /* try and match the entry as appropriate */ |
| 2542 | if (((tep->tag_pfn ^ va)>>tep->match_shift)==0 && tep->match_context==context && tep->partid == partid) { |
| 2543 | goto tlb_match; |
| 2544 | } |
| 2545 | } |
| 2546 | RW_unlock(&tlbp->rwlock); |
| 2547 | |
| 2548 | DBGMISS( lprintf(sp->gid, "dtlb miss: pc=%lx asi=%x va=%lx ctx=%x\n", sp->pc, asi, va, context); ); |
| 2549 | |
| 2550 | /* Based on the ASI access type setup accordingly */ |
| 2551 | switch (asi) { |
| 2552 | case SS_ASI_REAL_MEM: |
| 2553 | case SS_ASI_REAL_IO: |
| 2554 | case SS_ASI_REAL_MEM_LITTLE: |
| 2555 | case SS_ASI_REAL_IO_LITTLE: |
| 2556 | case SS_ASI_QUAD_LDD_REAL: |
| 2557 | case SS_ASI_QUAD_LDD_REAL_LITTLE: |
| 2558 | VA48_WARNING(sp, va); |
| 2559 | SET_DTLB_FAULT( nsp, VA48(va) ); |
| 2560 | nsp->dmmu.tag_access_reg = VA48(va) & ~MASK64(12,0); /* Do this properly later - FIXME */ |
| 2561 | DBGMMU( lprintf(sp->gid, "DMMU tag access = 0x%llx\n", nsp->dmmu.tag_access_reg); ); |
| 2562 | MEMORY_ACCESS_TRAP(); |
| 2563 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)SS_trap_data_real_translation_miss); |
| 2564 | break; |
| 2565 | |
| 2566 | default: |
| 2567 | /* |
| 2568 | * If the MMU is "disabled" in privileged mode ... this is a real miss, not a |
| 2569 | * virtual translation miss, so the fault context and trap type is different |
| 2570 | */ |
| 2571 | if (nsp->dmmu.enabled) { |
| 2572 | miss_context = context; |
| 2573 | miss_trap_type = SS_trap_fast_data_access_MMU_miss; |
| 2574 | } else { |
| 2575 | miss_context = 0; /* null for ra->pa miss undefined ? */ |
| 2576 | miss_trap_type = SS_trap_data_real_translation_miss; |
| 2577 | } |
| 2578 | nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | miss_context; /* Do this properly later - FIXME */ |
| 2579 | tlb_trap:; |
| 2580 | VA48_WARNING(sp, va); |
| 2581 | SET_DTLB_FAULT( nsp, VA48(va) ); |
| 2582 | DBGMMU( lprintf(sp->gid, "DMMU tag access = 0x%llx\n", nsp->dmmu.tag_access_reg); ); |
| 2583 | MEMORY_ACCESS_TRAP(); |
| 2584 | v9p->post_precise_trap(sp, (sparcv9_trap_type_t)miss_trap_type); |
| 2585 | break; |
| 2586 | } |
| 2587 | return; |
| 2588 | |
| 2589 | tlb_match:; |
| 2590 | /* we have a matching entry ... now all we have to worry about are the permissions */ |
| 2591 | flags = tep->flags; |
| 2592 | pa += tep->pa_offset; |
| 2593 | pa_tag += tep->pa_offset; |
| 2594 | |
| 2595 | RW_unlock(&tlbp->rwlock); |
| 2596 | |
| 2597 | /* |
| 2598 | * Errors on dtlb hit: stash table_entry pointer and if |
| 2599 | * subsequent itlb hit on same entry post error again. |
| 2600 | */ |
| 2601 | #if ERROR_INJECTION |
| 2602 | if (sp->error_check == true && errorp->check_dtlb) { |
| 2603 | bool_t is_load, is_store; |
| 2604 | |
| 2605 | is_load = IS_V9_MA_LOAD(op); |
| 2606 | is_store = IS_V9_MA_STORE(op); |
| 2607 | |
| 2608 | if (is_load) ep = find_errconf(sp, LD, DMDU); |
| 2609 | else |
| 2610 | if (is_store) ep = find_errconf(sp, ST, DMSU); |
| 2611 | |
| 2612 | if (ep) { |
| 2613 | if (errorp->dtep) { |
| 2614 | DBGERR( lprintf(sp->gid, "ss_memory_asi_access: " |
| 2615 | "errorp->dtep=%x, tep=%x\n", |
| 2616 | errorp->dtep,tep); ); |
| 2617 | if ((tlb_entry_t *)errorp->dtep == tep) { |
| 2618 | ss_error_condition(sp, ep); |
| 2619 | return; |
| 2620 | } |
| 2621 | } else { |
| 2622 | errorp->dtep = tep; |
| 2623 | errorp->addr = va; |
| 2624 | ss_error_condition(sp, ep); |
| 2625 | return; |
| 2626 | } |
| 2627 | } |
| 2628 | } |
| 2629 | #endif |
| 2630 | |
| 2631 | /* privilege test apparently takes priority ... p.51 US-I PRM table 6-4 */ |
| 2632 | if ((flags & SS_TLB_FLAG_PRIV) && !(mflags & MF_Has_Priv)) { |
| 2633 | nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */ |
| 2634 | niagara_set_sfsr(sp, &nsp->dmmu, va, MMU_SFSR_FT_PRIV, context_type, asi, 0/*fixme*/, 1); |
| 2635 | miss_trap_type = SS_trap_data_access_exception; |
| 2636 | goto tlb_trap; |
| 2637 | } |
| 2638 | |
| 2639 | /* |
| 2640 | * validate bits NFO, E and CP |
| 2641 | */ |
| 2642 | if ((flags & SS_TLB_FLAG_E) && (mflags & MF_No_Fault)) { |
| 2643 | nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */ |
| 2644 | miss_trap_type = SS_trap_data_access_exception; |
| 2645 | niagara_set_sfsr(sp, &nsp->dmmu, va, MMU_SFSR_FT_SO, context_type, asi, 0/*fixme*/, 1); |
| 2646 | goto tlb_trap; |
| 2647 | } |
| 2648 | if ((flags & SS_TLB_FLAG_NFO) && (!(mflags & MF_No_Fault))) { |
| 2649 | nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */ |
| 2650 | miss_trap_type = SS_trap_data_access_exception; |
| 2651 | niagara_set_sfsr(sp, &nsp->dmmu, va, MMU_SFSR_FT_NFO, context_type, asi, 0/*fixme*/, 1); |
| 2652 | goto tlb_trap; |
| 2653 | } |
| 2654 | if (!(flags & SS_TLB_FLAG_CP) && (mflags & MF_Atomic_Access)) { |
| 2655 | nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */ |
| 2656 | miss_trap_type = SS_trap_data_access_exception; |
| 2657 | niagara_set_sfsr(sp, &nsp->dmmu, va, MMU_SFSR_FT_ATOMICIO, context_type, asi, 0/*fixme*/, 1); |
| 2658 | goto tlb_trap; |
| 2659 | } |
| 2660 | |
| 2661 | if (IS_V9_MA_STORE(op) && !(flags & SS_TLB_FLAG_WRITE)) { |
| 2662 | uint64_t ps1, tte_ps1; |
| 2663 | nsp->dmmu.tag_access_reg = (VA48(va) & ~MASK64(12,0)) | ((SS_TLB_REAL_CONTEXT == context)?0:context); /* Do this properly later - FIXME */ |
| 2664 | ps1 = (context == 0) ? nsp->dmmu_ctxt_zero_tsb_ps1.page_size : nsp->dmmu_ctxt_nonzero_tsb_ps1.page_size; |
| 2665 | tte_ps1 = ((tep->data>>(48-2))&0x4) | ((tep->data>>61)&0x3); |
| 2666 | /* Is this the actual logic for direct TSB ptr select - FIXME */ |
| 2667 | /* State bit updated for data_access_protection - see PRM v1.0 p258 13.11.11 */ |
| 2668 | nsp->dmmu.tsb_direct_ps1 = (tte_ps1 == ps1); |
| 2669 | miss_trap_type = SS_trap_fast_data_access_protection; |
| 2670 | niagara_set_sfsr(sp, &nsp->dmmu, va, 0/*fixme*/, context_type, asi, 1, 0); |
| 2671 | goto tlb_trap; |
| 2672 | } |
| 2673 | |
| 2674 | mflags ^= (flags & SS_TLB_FLAG_IE) ? MF_Little_Endian : 0; |
| 2675 | |
| 2676 | perm_cache = (flags & SS_TLB_FLAG_WRITE) ? XDCACHE_WRITE_PERM : 0; |
| 2677 | perm_cache |= (flags & SS_TLB_FLAG_READ) ? XDCACHE_READ_PERM : 0; |
| 2678 | } else { |
| 2679 | /* Niagara only implements 40 bits of PA, the tlb code |
| 2680 | masks PA so here we need to mask bypass PAs */ |
| 2681 | pa &= MASK64(39,0); |
| 2682 | } |
| 2683 | |
| 2684 | /* |
| 2685 | * Now that we have the internal PA, map it to the real |
| 2686 | * external PA before looking it up in the domain. |
| 2687 | * This does not modify memory addresses, only JBus addresses. |
| 2688 | */ |
| 2689 | if (pa >= 0x800e000000ull && pa < 0x8010000000ull) { |
| 2690 | pa &= 0xffffffffull; |
| 2691 | pa |= 0x40000000000ull; |
| 2692 | } else if (pa >= 0x8010000000ull && pa < 0x8100000000ull) { |
| 2693 | pa &= 0x0ffffffffull; |
| 2694 | pa |= 0x60000000000ull; |
| 2695 | } else if (pa >= 0xc000000000ull && pa < 0xff00000000ull) { |
| 2696 | pa |= 0x70000000000ull; |
| 2697 | } |
| 2698 | /* |
| 2699 | * OK - now go get the pointer to the line data |
| 2700 | * ... start by finding the device that has the |
| 2701 | * memory we need. |
| 2702 | * optimise: by guessing at the last device found. |
| 2703 | */ |
| 2704 | |
| 2705 | /* now find the device - looking in the cache first */ |
| 2706 | |
| 2707 | cap = sp->xdc.miss_addrp; |
| 2708 | if (!(cap && (cap->baseaddr <= pa) && (pa < cap->topaddr))) { |
| 2709 | domain_t * domainp; |
| 2710 | config_proc_t * config_procp; |
| 2711 | |
| 2712 | config_procp = sp->config_procp; |
| 2713 | domainp = config_procp->domainp; |
| 2714 | |
| 2715 | cap = find_domain_address(domainp, pa); |
| 2716 | if (cap == NULL) { |
| 2717 | /* OK it's a bus error there was no backing store */ |
| 2718 | |
| 2719 | EXEC_WARNING(("bus error - (@pc=0x%llx, icount=%llu) access to va=0x%llx (pid=0x%x,ctx_type=0x%x,cacheline va=0x%llx -> physical 0x%llx)", sp->pc, ICOUNT(sp), va, nsp->partid, context_type, tag, pa_tag)); |
| 2720 | |
| 2721 | goto data_access_error; |
| 2722 | } |
| 2723 | } |
| 2724 | |
| 2725 | /* try and get the buffer pointer */ |
| 2726 | |
| 2727 | |
| 2728 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: calling dev_cacheable\n"); ); |
| 2729 | |
| 2730 | da = 0; |
| 2731 | if (IS_V9_MA_LOAD(op)) |
| 2732 | da |= DA_Load; |
| 2733 | if (IS_V9_MA_STORE(op)) |
| 2734 | da |= DA_Store; |
| 2735 | |
| 2736 | extent = cap->config_devp->dev_typep->dev_cacheable(cap, da, |
| 2737 | pa_tag-cap->baseaddr, &bufp); |
| 2738 | |
| 2739 | if (extent < XDCACHE_LINE_SIZE) { |
| 2740 | bool_t status; |
| 2741 | uint_t pio_op; |
| 2742 | uint64_t tempreg, *aregp; |
| 2743 | |
| 2744 | pio_op = memop & MA_Op_Mask; |
| 2745 | |
| 2746 | if ((MF_Little_Endian & mflags) && (pio_op == MA_St)) { |
| 2747 | tempreg = sparcv9_invert_endianess(regp, (1 << size)); |
| 2748 | aregp = &tempreg; |
| 2749 | } else if ((&(sp->intreg[Reg_sparcv9_g0]) == regp) && |
| 2750 | ((pio_op == MA_Ld) || (pio_op == MA_LdSigned))) { |
| 2751 | aregp = &tempreg; |
| 2752 | } else { |
| 2753 | aregp = regp; |
| 2754 | } |
| 2755 | |
| 2756 | status = cap->config_devp->dev_typep->dev_cpu_access(sp, cap, pa-cap->baseaddr, memop, aregp); |
| 2757 | |
| 2758 | if ((MF_Little_Endian & mflags) && status && (pio_op == MA_Ld || pio_op == MA_LdSigned)) { |
| 2759 | *regp = sparcv9_invert_endianess(regp, (1 << size)); |
| 2760 | if (pio_op == MA_LdSigned) { |
| 2761 | uint32_t shift; |
| 2762 | |
| 2763 | shift = 64 - (8 << size); |
| 2764 | *regp = ((sint64_t)(*regp << shift)) >> shift; |
| 2765 | } |
| 2766 | } |
| 2767 | |
| 2768 | ASSERT(0LL == sp->intreg[Reg_sparcv9_g0]); |
| 2769 | |
| 2770 | if (status) |
| 2771 | goto done; |
| 2772 | |
| 2773 | EXEC_WARNING(("bus error - (@pc=0x%llx) access to va=0x%llx " |
| 2774 | "(pid=0x%x,ctx_type=0x%x,physical 0x%llx)", |
| 2775 | sp->pc, va, nsp->partid, context_type, pa)); |
| 2776 | |
| 2777 | data_access_error:; |
| 2778 | #if !defined(NDEBUG) /* { */ |
| 2779 | do { |
| 2780 | config_proc_t * config_procp; |
| 2781 | config_procp = sp->config_procp; |
| 2782 | ss_dump_tlbs(config_procp, true); |
| 2783 | /* abort(); */ /* FIXME - no longer need this ? */ |
| 2784 | } while (0); |
| 2785 | #endif /* } */ |
| 2786 | MEMORY_ACCESS_TRAP(); |
| 2787 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: post_precise_trap \n"); ); |
| 2788 | |
| 2789 | v9p->post_precise_trap(sp, Sparcv9_trap_data_access_error); /* FIXME: right trap ? */ |
| 2790 | return; |
| 2791 | } |
| 2792 | |
| 2793 | #if ERROR_INJECTION /* { */ |
| 2794 | /* processor-wide checks for unhandled L2 and DRAM errors */ |
| 2795 | if (npp->error_check) { |
| 2796 | bool_t is_load, is_store, is_atomic; |
| 2797 | uint8_t bank; |
| 2798 | |
| 2799 | is_load = IS_V9_MA_LOAD(op); |
| 2800 | is_store = IS_V9_MA_STORE(op); |
| 2801 | is_atomic = IS_V9_MA_ATOMIC(op); |
| 2802 | |
| 2803 | /* check if direct-map mode displacement flushing the error cacheline */ |
| 2804 | l2p = npp->l2p; |
| 2805 | bank = (pa >> 6) & 0x3; |
| 2806 | if (l2p->control[bank] & L2_DMMODE) { |
| 2807 | if ((pa & L2_DM_MASK) == (npp->errorp->ldac_addr & L2_DM_MASK)) { |
| 2808 | npp->errorp->ldac_addr = NULL; |
| 2809 | ss_set_errcheck(npp); |
| 2810 | goto npp_err_done; |
| 2811 | } |
| 2812 | if ((pa & L2_DM_MASK) == (npp->errorp->ldau_addr & L2_DM_MASK)) { |
| 2813 | npp->errorp->ldac_addr = NULL; |
| 2814 | ss_set_errcheck(npp); |
| 2815 | goto npp_err_done; |
| 2816 | } |
| 2817 | } |
| 2818 | |
| 2819 | /* |
| 2820 | * when accessing cacheline with error: load or partial store |
| 2821 | * causes LDAC or LDAU, store to line with correctible error clears it, |
| 2822 | * store to uncorrectible causes a writeback error |
| 2823 | */ |
| 2824 | if (pa == npp->errorp->ldac_addr) { |
| 2825 | if (is_load || |
| 2826 | (is_store && (size == MA_Size8 || size == MA_Size16))) { |
| 2827 | ep = new_errconf((is_load ? LD : ST), LDAC); |
| 2828 | ep->npp = true; |
| 2829 | goto lda_err; |
| 2830 | } else if (is_store) { |
| 2831 | npp->errorp->ldac_addr = NULL; |
| 2832 | ss_set_errcheck(npp); |
| 2833 | } |
| 2834 | } else if (pa = npp->errorp->ldau_addr) { |
| 2835 | if (is_load || |
| 2836 | (is_store && (size == MA_Size8 || size == MA_Size16))) { |
| 2837 | ep = new_errconf((is_load ? LD : ST), LDAU); |
| 2838 | ep->npp = true; |
| 2839 | goto lda_err; |
| 2840 | } else if (is_store) { |
| 2841 | npp->errorp->ldau_addr = NULL; |
| 2842 | ss_set_errcheck(npp); |
| 2843 | } |
| 2844 | } |
| 2845 | } |
| 2846 | |
| 2847 | npp_err_done: |
| 2848 | |
| 2849 | /* now check for errors to be generated from this thread's error list */ |
| 2850 | if (sp->error_check && errorp->check_xdcache) { |
| 2851 | bool_t is_load, is_store, is_atomic; |
| 2852 | uint8_t bank; |
| 2853 | xicache_t * xicp; |
| 2854 | xicache_instn_t * xip; |
| 2855 | uint64_t xidx; |
| 2856 | tvaddr_t xpc; |
| 2857 | |
| 2858 | is_load = IS_V9_MA_LOAD(op); |
| 2859 | is_store = IS_V9_MA_STORE(op); |
| 2860 | is_atomic = IS_V9_MA_ATOMIC(op); |
| 2861 | |
| 2862 | if (is_load) ep = find_errconf(sp, LD, |
| 2863 | (DTC|DDC|IRC|IRU|FRC|FRU|LDAC|LDWC|LDAU|LDWU|DAC|DAU)); |
| 2864 | else |
| 2865 | if (is_store) ep = find_errconf(sp, ST, |
| 2866 | (DTC|DDC|IRC|IRU|FRC|FRU|LDAC|LDWC|LDAU|LDWU|DAC|DAU)); |
| 2867 | |
| 2868 | if (ep) |
| 2869 | switch(ep->type) { |
| 2870 | case IRC: |
| 2871 | case IRU: |
| 2872 | case FRC: |
| 2873 | case FRU: |
| 2874 | xicp = sp->xicachep; |
| 2875 | xpc = sp->pc; |
| 2876 | xidx = (xpc>>2) & XICACHE_NUM_INSTR_MASK; |
| 2877 | xip = &xicp->instn[xidx]; |
| 2878 | errorp->reg = X_RS1(xip->rawi); |
| 2879 | ss_error_condition(sp, ep); |
| 2880 | return; |
| 2881 | case DTC: |
| 2882 | case DDC: |
| 2883 | errorp->addr = pa; |
| 2884 | ss_error_condition(sp, ep); |
| 2885 | return; |
| 2886 | lda_err: case LDAU: |
| 2887 | case LDAC: |
| 2888 | l2p = npp->l2p; |
| 2889 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 2890 | if (l2p->control[bank] & L2_DIS) goto l2_disabled; |
| 2891 | } |
| 2892 | if (is_load) { |
| 2893 | if (is_atomic) errorp->l2_write = L2_RW_bit; |
| 2894 | errorp->addr = pa; |
| 2895 | ss_error_condition(sp, ep); |
| 2896 | return; |
| 2897 | } else |
| 2898 | if (is_store && (size == MA_Size8 || size == MA_Size16)) { |
| 2899 | errorp->l2_write = L2_RW_bit; |
| 2900 | errorp->partial_st = true; |
| 2901 | errorp->addr = pa; |
| 2902 | ss_error_condition(sp, ep); |
| 2903 | return; |
| 2904 | } |
| 2905 | break; |
| 2906 | |
| 2907 | ldw_err: case LDWU: |
| 2908 | case LDWC: |
| 2909 | l2p = npp->l2p; |
| 2910 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 2911 | if (l2p->control[bank] & L2_DIS) goto l2_disabled; |
| 2912 | } |
| 2913 | if (is_store) { |
| 2914 | errorp->addr = pa; |
| 2915 | ss_error_condition(sp, ep); |
| 2916 | return; |
| 2917 | } |
| 2918 | break; |
| 2919 | |
| 2920 | case DAC: |
| 2921 | l2p = npp->l2p; |
| 2922 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 2923 | if (l2p->control[bank] & L2_DIS) goto l2_disabled; |
| 2924 | } |
| 2925 | if (ep->op == LD && is_load) { |
| 2926 | if (is_atomic) errorp->l2_write = L2_RW_bit; |
| 2927 | errorp->addr = pa; |
| 2928 | ss_error_condition(sp, ep); |
| 2929 | return; |
| 2930 | } else |
| 2931 | if (ep->op == ST && is_store) { |
| 2932 | if (size == MA_Size8 || size == MA_Size16) |
| 2933 | errorp->partial_st = true; |
| 2934 | errorp->l2_write = L2_RW_bit; |
| 2935 | errorp->addr = pa; |
| 2936 | ss_error_condition(sp, ep); |
| 2937 | return; |
| 2938 | } |
| 2939 | break; |
| 2940 | |
| 2941 | case DAU: |
| 2942 | l2p = npp->l2p; |
| 2943 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 2944 | if (l2p->control[bank] & L2_DIS) goto l2_disabled; |
| 2945 | } |
| 2946 | if (ep->op == LD && is_load) { |
| 2947 | if (is_atomic) errorp->l2_write = L2_RW_bit; |
| 2948 | errorp->addr = pa; |
| 2949 | ss_error_condition(sp, ep); |
| 2950 | return; |
| 2951 | } else |
| 2952 | if (ep->op == ST && is_store) { |
| 2953 | if (size == MA_Size8 || size == MA_Size16) |
| 2954 | errorp->partial_st = true; |
| 2955 | errorp->l2_write = L2_RW_bit; |
| 2956 | errorp->addr = pa; |
| 2957 | ss_error_condition(sp, ep); |
| 2958 | return; |
| 2959 | } |
| 2960 | break; |
| 2961 | |
| 2962 | l2_disabled: DBGERR( lprintf(sp->gid, "ss_memory_asi_access: " |
| 2963 | "No LDAC/LDWC/LDAU/LDWU/DAC Error - L2 disabled\n"); ); |
| 2964 | break; |
| 2965 | } |
| 2966 | } |
| 2967 | #endif /* } */ |
| 2968 | |
| 2969 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: handling cacheable device memory\n"); ); |
| 2970 | |
| 2971 | /* |
| 2972 | * Now handle cacheable device memory |
| 2973 | * |
| 2974 | * Because we implicitly assume that the xdc uses the current context |
| 2975 | * we only add missed entries to the xdc iff it was a normal memory op |
| 2976 | */ |
| 2977 | |
| 2978 | if ((mflags & (MF_Normal|MF_Little_Endian)) == MF_Normal) { |
| 2979 | long ridx; |
| 2980 | xdcache_line_t * xclp; |
| 2981 | |
| 2982 | sp->xdc.miss_addrp = cap; /* cache for next time */ |
| 2983 | |
| 2984 | ridx = (va >> XDCACHE_RAW_SHIFT) & XDCACHE_RAW_LINE_MASK; |
| 2985 | xclp = (xdcache_line_t *)(((uint8_t*)&(sp->xdc.line[0])) + ridx); |
| 2986 | /* only cache if memory is cacheable */ |
| 2987 | /* fill in the line */ |
| 2988 | /* WARNING: This tag may be a full 64bit value even if pstate.am=1 */ |
| 2989 | /* do not use ea_offset with anything else other than tag */ |
| 2990 | xclp->tag = tag | perm_cache | sp->tagstate; |
| 2991 | xclp->offset = ((uint64_t)bufp) - tag; |
| 2992 | } |
| 2993 | |
| 2994 | /* |
| 2995 | * Sigh now complete the load/store on behalf of the original |
| 2996 | * load instruction |
| 2997 | */ |
| 2998 | |
| 2999 | #if HOST_CPU_LITTLE_ENDIAN |
| 3000 | /* temporary hack */ |
| 3001 | mflags ^= MF_Little_Endian; |
| 3002 | #endif |
| 3003 | |
| 3004 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: completing load/store on behalf of original instr.\n"); ); |
| 3005 | |
| 3006 | ptr = (uint8_t*)(bufp + (pa & XDCACHE_LINE_OFFSET_MASK) ); |
| 3007 | |
| 3008 | switch (op) { |
| 3009 | uint64_t val, cval; |
| 3010 | |
| 3011 | case MA_Ld: |
| 3012 | switch (size) { |
| 3013 | case MA_Size8: |
| 3014 | val = *(uint8_t*)ptr; |
| 3015 | break; |
| 3016 | case MA_Size16: |
| 3017 | val = *(uint16_t*)ptr; |
| 3018 | break; |
| 3019 | case MA_Size32: |
| 3020 | val = *(uint32_t*)ptr; |
| 3021 | break; |
| 3022 | case MA_Size64: |
| 3023 | val = *(uint64_t*)ptr; |
| 3024 | break; |
| 3025 | default: |
| 3026 | abort(); |
| 3027 | } |
| 3028 | if (MF_Little_Endian & mflags) { |
| 3029 | DBGLE( lprintf(sp->gid, "SunSPARC::: MA_Ld with LE - val=0x%llx count=0x%x\n", |
| 3030 | val, (1 << size)); ); |
| 3031 | val = sparcv9_invert_endianess(&val, (1 << size)); |
| 3032 | } |
| 3033 | goto complete_load; |
| 3034 | |
| 3035 | case MA_LdSigned: |
| 3036 | switch (size) { |
| 3037 | case MA_Size8: |
| 3038 | val = *(sint8_t*)ptr; |
| 3039 | break; |
| 3040 | case MA_Size16: |
| 3041 | val = *(sint16_t*)ptr; |
| 3042 | break; |
| 3043 | case MA_Size32: |
| 3044 | val = *(sint32_t*)ptr; |
| 3045 | break; |
| 3046 | default: |
| 3047 | abort(); |
| 3048 | } |
| 3049 | if (MF_Little_Endian & mflags) { |
| 3050 | uint32_t shift; |
| 3051 | |
| 3052 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_LdSigned with LE - val=0x%llx count=0x%x\n", |
| 3053 | val, (1 << size)); ); |
| 3054 | val = sparcv9_invert_endianess(&val, (1 << size)); |
| 3055 | shift = 64 - (8 << size); |
| 3056 | val = ((sint64_t)(val << shift)) >> shift; |
| 3057 | } |
| 3058 | goto complete_load; |
| 3059 | |
| 3060 | case MA_St: |
| 3061 | if (MF_Little_Endian & mflags) { |
| 3062 | DBGLE( lprintf(sp->gid, "SunSPARC::: MA_St with LE - val=0x%llx\n", *regp); ); |
| 3063 | val = sparcv9_invert_endianess(regp, (1 << size)); |
| 3064 | } else { |
| 3065 | val = *regp; |
| 3066 | } |
| 3067 | if (mflags & MF_Blk_Init) { |
| 3068 | /* If line in L2 cache, leave data alone, otherwise zero it */ |
| 3069 | /* XXX How to simulate? */ |
| 3070 | ((uint64_t*)ptr)[0] = 0; |
| 3071 | ((uint64_t*)ptr)[1] = 0; |
| 3072 | ((uint64_t*)ptr)[2] = 0; |
| 3073 | ((uint64_t*)ptr)[3] = 0; |
| 3074 | ((uint64_t*)ptr)[4] = 0; |
| 3075 | ((uint64_t*)ptr)[5] = 0; |
| 3076 | ((uint64_t*)ptr)[6] = 0; |
| 3077 | ((uint64_t*)ptr)[7] = 0; |
| 3078 | } |
| 3079 | switch (size) { |
| 3080 | case MA_Size8: |
| 3081 | *(uint8_t*)ptr = val; |
| 3082 | break; |
| 3083 | case MA_Size16: |
| 3084 | *(uint16_t*)ptr = val; |
| 3085 | break; |
| 3086 | case MA_Size32: |
| 3087 | *(uint32_t*)ptr = val; |
| 3088 | break; |
| 3089 | case MA_Size64: |
| 3090 | *(uint64_t*)ptr = val; |
| 3091 | break; |
| 3092 | default: |
| 3093 | abort(); |
| 3094 | } |
| 3095 | break; |
| 3096 | |
| 3097 | case MA_LdFloat: |
| 3098 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_LdFloat with LE - \n"); ); |
| 3099 | |
| 3100 | ASSERT(&(sp->intreg[Reg_sparcv9_g0]) != regp); |
| 3101 | switch (size) { |
| 3102 | case MA_Size32: |
| 3103 | if (MF_Little_Endian & mflags) { |
| 3104 | val = *(ieee_fp32_t*)ptr; |
| 3105 | *(ieee_fp32_t*)regp = |
| 3106 | sparcv9_invert_endianess(&val, |
| 3107 | sizeof (ieee_fp32_t)); |
| 3108 | } else |
| 3109 | *(ieee_fp32_t*)regp = *(ieee_fp32_t*)ptr; |
| 3110 | break; |
| 3111 | case MA_Size64: |
| 3112 | if (MF_Little_Endian & mflags) |
| 3113 | *(ieee_fp64_t*)regp = |
| 3114 | sparcv9_invert_endianess( |
| 3115 | (uint64_t *)ptr, |
| 3116 | sizeof (ieee_fp64_t)); |
| 3117 | else |
| 3118 | *(ieee_fp64_t*)regp = *(ieee_fp64_t*)ptr; |
| 3119 | break; |
| 3120 | case MA_Size512: |
| 3121 | if ((MF_Little_Endian & mflags) == 0) { |
| 3122 | uint_t i; |
| 3123 | for (i = 0; i < 8; i++) { |
| 3124 | *(ieee_fp64_t*)(regp + i) = |
| 3125 | *(ieee_fp64_t*)(ptr + i*8); |
| 3126 | } |
| 3127 | } else { |
| 3128 | uint_t i; |
| 3129 | for (i = 0; i < 8; i++) { |
| 3130 | *(ieee_fp64_t*)(regp + i) = |
| 3131 | sparcv9_invert_endianess( |
| 3132 | (uint64_t *)(ptr + i*8), |
| 3133 | sizeof (ieee_fp64_t)); |
| 3134 | } |
| 3135 | } |
| 3136 | break; |
| 3137 | #ifdef PROCESSOR_SUPPORTS_QUADFP /* { */ |
| 3138 | case MA_Size128: |
| 3139 | ASSERT((MF_Little_Endian & mflags) == 0); |
| 3140 | *(ieee_fp128_t*)regp = *(ieee_fp128_t*)ptr; |
| 3141 | break; |
| 3142 | #endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */ |
| 3143 | default: |
| 3144 | abort(); |
| 3145 | } |
| 3146 | goto done; |
| 3147 | |
| 3148 | case MA_StFloat: |
| 3149 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_StFloat with LE - \n"); ); |
| 3150 | |
| 3151 | switch (size) { |
| 3152 | case MA_Size32: |
| 3153 | if (MF_Little_Endian & mflags) { |
| 3154 | val = *(ieee_fp32_t*)regp; |
| 3155 | *(ieee_fp32_t*)ptr = |
| 3156 | sparcv9_invert_endianess(&val, |
| 3157 | sizeof (ieee_fp32_t)); |
| 3158 | } else |
| 3159 | *(ieee_fp32_t*)ptr = *(ieee_fp32_t*)regp; |
| 3160 | break; |
| 3161 | case MA_Size64: |
| 3162 | if (MF_Little_Endian & mflags) |
| 3163 | *(ieee_fp64_t*)ptr = |
| 3164 | sparcv9_invert_endianess(regp, |
| 3165 | sizeof (ieee_fp64_t)); |
| 3166 | else |
| 3167 | *(ieee_fp64_t*)ptr = *(ieee_fp64_t*)regp; |
| 3168 | break; |
| 3169 | case MA_Size512: |
| 3170 | if ((MF_Little_Endian & mflags) == 0) { |
| 3171 | uint_t i; |
| 3172 | for (i = 0; i < 8; i++) { |
| 3173 | *(ieee_fp64_t*)(ptr + i*8) = |
| 3174 | *(ieee_fp64_t*)(regp + i); |
| 3175 | } |
| 3176 | } else { |
| 3177 | uint_t i; |
| 3178 | for (i = 0; i < 8; i++) { |
| 3179 | *(ieee_fp64_t*)(ptr + i*8) = |
| 3180 | sparcv9_invert_endianess( |
| 3181 | (regp + i), |
| 3182 | sizeof (ieee_fp64_t)); |
| 3183 | } |
| 3184 | } |
| 3185 | break; |
| 3186 | #ifdef PROCESSOR_SUPPORTS_QUADFP /* { */ |
| 3187 | case MA_Size128: |
| 3188 | ASSERT((MF_Little_Endian & mflags) == 0); |
| 3189 | *(ieee_fp128_t*)ptr = *(ieee_fp128_t*)regp; |
| 3190 | break; |
| 3191 | #endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */ |
| 3192 | default: |
| 3193 | abort(); |
| 3194 | } |
| 3195 | goto done; |
| 3196 | |
| 3197 | case MA_LdSt: |
| 3198 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_LdSt with LE - \n"); ); |
| 3199 | switch (size) { |
| 3200 | case MA_Size8: |
| 3201 | val = host_ldstub(ptr, reg2, *regp); |
| 3202 | break; |
| 3203 | default: |
| 3204 | abort(); |
| 3205 | } |
| 3206 | goto complete_load; |
| 3207 | |
| 3208 | case MA_Swap: |
| 3209 | |
| 3210 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_Swap with LE - \n"); ); |
| 3211 | |
| 3212 | if (MF_Little_Endian & mflags) { |
| 3213 | val = sparcv9_invert_endianess(regp, (1 << size)); |
| 3214 | } else { |
| 3215 | val = *regp; |
| 3216 | } |
| 3217 | switch (size) { |
| 3218 | case MA_Size32: |
| 3219 | val = host_swap((uint32_t *)ptr, val); |
| 3220 | break; |
| 3221 | default: |
| 3222 | abort(); |
| 3223 | } |
| 3224 | if (MF_Little_Endian & mflags) { |
| 3225 | val = sparcv9_invert_endianess(&val, (1 << size)); |
| 3226 | } |
| 3227 | goto complete_load; |
| 3228 | |
| 3229 | case MA_CAS: |
| 3230 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: MA_CAS with LE - \n"); ); |
| 3231 | if (MF_Little_Endian & mflags) { |
| 3232 | val = sparcv9_invert_endianess(regp, (1 << size)); |
| 3233 | cval = sparcv9_invert_endianess(®2, (1 << size)); |
| 3234 | } else { |
| 3235 | val = *regp; |
| 3236 | cval = reg2; |
| 3237 | } |
| 3238 | switch (size) { |
| 3239 | case MA_Size32: |
| 3240 | val = host_cas32((uint32_t *)ptr, cval, val); |
| 3241 | break; |
| 3242 | case MA_Size64: |
| 3243 | val = host_cas64((uint64_t *)ptr, cval, val); |
| 3244 | break; |
| 3245 | default: |
| 3246 | abort(); |
| 3247 | } |
| 3248 | if (MF_Little_Endian & mflags) { |
| 3249 | val = sparcv9_invert_endianess(&val, (1 << size)); |
| 3250 | } |
| 3251 | goto complete_load; |
| 3252 | |
| 3253 | complete_load: |
| 3254 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) |
| 3255 | *regp = val; |
| 3256 | break; |
| 3257 | |
| 3258 | case MA_LdDouble: |
| 3259 | switch (size) { |
| 3260 | case MA_Size64: /* standard sparc LDD instruction */ |
| 3261 | val = *(uint64_t *)ptr; |
| 3262 | regp[0] = (uint32_t)(val >> 32); |
| 3263 | regp[1] = (uint32_t)val; |
| 3264 | if (MF_Little_Endian & mflags) { |
| 3265 | DBGLE( lprintf(sp->gid, "SunSPARC::: MA_LdDouble with LE - val=0x%llx count=0x%x\n", |
| 3266 | val, (1 << size)); ); |
| 3267 | regp[0] = sparcv9_invert_endianess(®p[0], (1 << size)>>1); |
| 3268 | regp[1] = sparcv9_invert_endianess(®p[1], (1 << size)>>1); |
| 3269 | } |
| 3270 | sp->intreg[Reg_sparcv9_g0] = 0; /* regp might be %g0 */ |
| 3271 | break; |
| 3272 | case MA_Size128: |
| 3273 | host_atomic_get128be((uint64_t *)ptr, regp, ®p[1]); |
| 3274 | if (MF_Little_Endian & mflags) { |
| 3275 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_ldDouble with LE - val=0x%llx,0x%llx count=0x%x\n", |
| 3276 | regp[0], regp[1], (1 << size)); ); |
| 3277 | regp[0] = sparcv9_invert_endianess(®p[0], (1 << size)>>1); |
| 3278 | regp[1] = sparcv9_invert_endianess(®p[1], (1 << size)>>1); |
| 3279 | } |
| 3280 | sp->intreg[Reg_sparcv9_g0] = 0; /* regp might be %g0 */ |
| 3281 | break; |
| 3282 | default: |
| 3283 | fatal("ss_memory_asi_access: internal error - " |
| 3284 | "illegal size for MA_LdDouble"); |
| 3285 | } |
| 3286 | break; |
| 3287 | |
| 3288 | case MA_StDouble: |
| 3289 | { |
| 3290 | uint32_t reven; |
| 3291 | uint32_t rodd; |
| 3292 | ASSERT(size == MA_Size64); |
| 3293 | if (MF_Little_Endian & mflags) { |
| 3294 | DBGLE(lprintf(sp->gid, "SunSPARC::: MA_StDouble with LE - reven=0x%x rodd=0x%x count=0x%x\n", |
| 3295 | (uint32_t)regp[0], (uint32_t)regp[1], (1 << size)); ); |
| 3296 | reven = (uint32_t)sparcv9_invert_endianess(®p[0], (1 << size)>>1); |
| 3297 | rodd = (uint32_t)sparcv9_invert_endianess(®p[1], (1 << size)>>1); |
| 3298 | } else { |
| 3299 | reven = (uint32_t)regp[0]; |
| 3300 | rodd = (uint32_t)regp[1]; |
| 3301 | } |
| 3302 | val = ((uint64_t)reven << 32) | ((uint32_t)rodd); |
| 3303 | *(uint64_t *)ptr = val; |
| 3304 | } |
| 3305 | break; |
| 3306 | |
| 3307 | case MA_V9_LdFSR: |
| 3308 | ASSERT( MA_Size32 == size ); |
| 3309 | val = *(uint32_t*)ptr; |
| 3310 | if (MF_Little_Endian & mflags) |
| 3311 | val = sparcv9_invert_endianess(&val, (1 << size)); |
| 3312 | v9_set_fsr_lower(sp, val); |
| 3313 | break; |
| 3314 | |
| 3315 | case MA_V9_LdXFSR: |
| 3316 | ASSERT( MA_Size64 == size ); |
| 3317 | val = *(uint64_t*)ptr; |
| 3318 | if (MF_Little_Endian & mflags) |
| 3319 | val = sparcv9_invert_endianess(&val, (1 << size)); |
| 3320 | v9_set_fsr(sp, val); |
| 3321 | break; |
| 3322 | |
| 3323 | case MA_V9_StFSR: |
| 3324 | ASSERT( MA_Size32 == size ); |
| 3325 | val = v9_get_fsr(sp); |
| 3326 | if (MF_Little_Endian & mflags) |
| 3327 | val = sparcv9_invert_endianess(&val, (1 << size)); |
| 3328 | *(uint32_t*)ptr = val & MASK64(31,0); |
| 3329 | /* FTT is cleared on read of FSR */ |
| 3330 | sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK; |
| 3331 | DBGFSR( lprintf(sp->gid, "stfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp->pc, v9_get_fsr(sp), val); ); |
| 3332 | break; |
| 3333 | |
| 3334 | case MA_V9_StXFSR: |
| 3335 | ASSERT( MA_Size64 == size ); |
| 3336 | val = v9_get_fsr(sp); |
| 3337 | if (MF_Little_Endian & mflags) |
| 3338 | val = sparcv9_invert_endianess(&val, (1 << size)); |
| 3339 | *(uint64_t*)ptr = val; |
| 3340 | /* FTT is cleared on read of FSR */ |
| 3341 | sp->v9_fsr_ctrl &= ~V9_FSR_FTT_MASK; |
| 3342 | DBGFSR( lprintf(sp->gid, "stxfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp->pc, v9_get_fsr(sp), val); ); |
| 3343 | break; |
| 3344 | |
| 3345 | default: |
| 3346 | abort(); |
| 3347 | } |
| 3348 | |
| 3349 | done:; |
| 3350 | /* |
| 3351 | * Finally go get the next instruction |
| 3352 | */ |
| 3353 | |
| 3354 | DBGLE( if (MF_Little_Endian & mflags) lprintf(sp->gid, "SunSPARC::: getting the next instr.\n"); ); |
| 3355 | |
| 3356 | NEXT_INSTN(sp); |
| 3357 | } |
| 3358 | |
| 3359 | |
| 3360 | /* |
| 3361 | * Insert the entry using the mmup->tag_access_reg and the supplied data field |
| 3362 | * into the TLB. Being careful of course to first invalidate any entries which |
| 3363 | * first conflict with the page we're tryinng to insert |
| 3364 | * |
| 3365 | * Returns false on failure, true on success ... failure implies |
| 3366 | * a data access exception for the caller - which it must generate. |
| 3367 | */ |
| 3368 | bool_t |
| 3369 | ss_tlb_insert(simcpu_t * sp, ss_mmu_t * mmup, ss_tlb_t * tlbp, uint_t partid, |
| 3370 | bool_t is_real, uint64_t data, uint_t idx) |
| 3371 | { |
| 3372 | tlb_entry_t * tep; |
| 3373 | tlb_entry_t te_copy; |
| 3374 | uint_t shift, size; |
| 3375 | tvaddr_t tag; |
| 3376 | uint16_t tag_context; |
| 3377 | matchcontext_t match_context; |
| 3378 | uint_t i; |
| 3379 | bool_t need_flush = false; |
| 3380 | |
| 3381 | |
| 3382 | /* FIXME: what does niagara do if you try to load an invalid TTE ? */ |
| 3383 | if (idx == SS_TLB_LRU && ((data >> SUN4U_TTED_V_BIT)&1) == 0) { |
| 3384 | EXEC_WARNING(("tlb_insert 0x%llx (@pc=0x%llx, icount=%llu) TTE invalid", data, sp->pc, ICOUNT(sp))); |
| 3385 | } |
| 3386 | |
| 3387 | size = ((data>>(48-2))&0x4) | ((data>>61)&0x3); |
| 3388 | |
| 3389 | /* figure out the useful info about our page to insert */ |
| 3390 | shift = SUN4V_PAGE_SIZE_SHIFT(size); |
| 3391 | if (shift == 0) return false; |
| 3392 | |
| 3393 | /* |
| 3394 | * This is VERY important: |
| 3395 | * The tag access register need NOT contain a correctly aligned tag entry |
| 3396 | * for the given page size. So it is REALLY IMPORTANT when forming the TLB |
| 3397 | * entry tag field that we correctly mask off the lower bits corresponding to |
| 3398 | * the selected page size. This especially important because we use this value to |
| 3399 | * compute a va-pa offset. |
| 3400 | * Note: we do a similar mask operation later when using the PA to compute the |
| 3401 | * offset value we create. |
| 3402 | */ |
| 3403 | |
| 3404 | tag = mmup->tag_access_reg & MASK64(63,shift); |
| 3405 | |
| 3406 | |
| 3407 | tag_context = mmup->tag_access_reg & MASK64(12,0); |
| 3408 | match_context = is_real ? SS_TLB_REAL_CONTEXT : tag_context; |
| 3409 | |
| 3410 | RW_wrlock(&tlbp->rwlock); |
| 3411 | |
| 3412 | /* |
| 3413 | * First lets look for potentially matching pages we may have to |
| 3414 | * de-map first. We demap the old entry if it incorporates our new |
| 3415 | * page, or vice-versa. |
| 3416 | */ |
| 3417 | |
| 3418 | tep = &(tlbp->tlb_entryp[0]); |
| 3419 | for (i=tlbp->nentries; i>0; i--, tep++) { |
| 3420 | tvaddr_t xor; |
| 3421 | |
| 3422 | if (tep->hashidx == -1) |
| 3423 | continue; |
| 3424 | |
| 3425 | xor = tep->tag_pfn ^ tag; |
| 3426 | |
| 3427 | if ( ( (xor>>tep->match_shift)==0 || (xor >> shift)==0 ) && |
| 3428 | tep->match_context == match_context && tep->partid == partid) { |
| 3429 | |
| 3430 | need_flush = true; |
| 3431 | /* matching entry - put back on the free list */ |
| 3432 | ss_tlb_unhash(tlbp, tep); |
| 3433 | ss_free_tlb_entry( tlbp, tep ); |
| 3434 | |
| 3435 | #if ERROR_INJECTION |
| 3436 | DBGERR( lprintf(sp->gid, "ss_tlb_insert(): errorp->itep=%x" |
| 3437 | " errorp->dtep=%x tep=%x\n", |
| 3438 | sp->errorp->itep, sp->errorp->dtep, tep); ); |
| 3439 | tlb_entry_error_match(sp, mmup, tep); |
| 3440 | #endif |
| 3441 | } |
| 3442 | } |
| 3443 | |
| 3444 | /* |
| 3445 | * Now we need to pick an entry for the one we wish |
| 3446 | * to insert |
| 3447 | */ |
| 3448 | |
| 3449 | if (idx != SS_TLB_LRU) { |
| 3450 | tep = &tlbp->tlb_entryp[idx]; |
| 3451 | if (tep->hashidx != -1) { |
| 3452 | need_flush = true; |
| 3453 | ss_tlb_unhash(tlbp, tep); |
| 3454 | } else |
| 3455 | ss_tlb_unfree(tlbp, tep); |
| 3456 | } else { |
| 3457 | tep = tlbp->freep; |
| 3458 | if (tep == (tlb_entry_t*)0) { |
| 3459 | |
| 3460 | /* OK replacement required - clobber a valid entry */ |
| 3461 | /* FIXME: What is Niagara's replacement policy ? */ |
| 3462 | #if SS_TLB_REPLACE_RANDOM /* { */ |
| 3463 | do { |
| 3464 | i = random() % tlbp->nentries; |
| 3465 | tep = &(tlbp->tlb_entryp[i]); |
| 3466 | } while (tep->flags & SS_TLB_FLAG_LOCKED); |
| 3467 | #elif SS_TLB_REPLACE_RROBIN /* } { */ |
| 3468 | i = tlbp->last_replaced; |
| 3469 | do { |
| 3470 | i = i+1; |
| 3471 | if (i>=tlbp->nentries) i=0; /* wrap */ |
| 3472 | tep = &(tlbp->tlb_entryp[i]); |
| 3473 | |
| 3474 | if (i==tlbp->last_replaced) { |
| 3475 | /* |
| 3476 | * if all entries are locked, replace the final TLB entry |
| 3477 | */ |
| 3478 | i = tlbp->nentries - 1; |
| 3479 | EXEC_WARNING(("all TLB entries are locked, the final TLB entry %d is replaced",i)); |
| 3480 | tep = &(tlbp->tlb_entryp[i]); |
| 3481 | break; |
| 3482 | } |
| 3483 | } while (tep->flags & SS_TLB_FLAG_LOCKED); |
| 3484 | tlbp->last_replaced = i; |
| 3485 | #else |
| 3486 | #error Need to define TLB replacement alg |
| 3487 | #endif /* } */ |
| 3488 | |
| 3489 | need_flush = true; |
| 3490 | /* put back on the free list */ |
| 3491 | ss_tlb_unhash(tlbp, tep); |
| 3492 | ss_free_tlb_entry( tlbp, tep ); |
| 3493 | tep = tlbp->freep; |
| 3494 | } |
| 3495 | |
| 3496 | /* free entry must be invalid ! */ |
| 3497 | ASSERT(!(tep->data&(1ull<<SUN4U_TTED_V_BIT))); |
| 3498 | |
| 3499 | tlbp->freep = tep->nextp; |
| 3500 | } |
| 3501 | |
| 3502 | /* create the new entry */ |
| 3503 | |
| 3504 | tep->is_real = is_real; |
| 3505 | tep->match_context = match_context; |
| 3506 | tep->partid = partid; |
| 3507 | tep->match_shift = shift; |
| 3508 | tep->tag_pfn = tag; |
| 3509 | tep->tag_context = tag_context; |
| 3510 | tep->data = data; |
| 3511 | /* Note: variable size mask again based on page size */ |
| 3512 | tep->pa_offset = (data & MASK64(39,shift)) - tag; |
| 3513 | |
| 3514 | DBGMMU( lprintf(sp->gid, "tlb_insert: %c-TLB: tte=%llx [ sz=0x%x l=%d cp=%d cv=%d e=%d p=%d w=%d ]\n", |
| 3515 | mmup->is_immu ? 'I' : 'D', data, |
| 3516 | size, |
| 3517 | (uint_t)((data>>6)&1LL), (uint_t)((data>>5)&1LL), |
| 3518 | (uint_t)((data>>4)&1LL), (uint_t)((data>>3)&1LL), |
| 3519 | (uint_t)((data>>2)&1LL), (uint_t)((data>>1)&1LL) |
| 3520 | ); |
| 3521 | lprintf(sp->gid, "\tpart=0x%x tag=%p ctx=%x/%x offset=%llx\n", |
| 3522 | partid, tag, tag_context, match_context, tep->pa_offset); ); |
| 3523 | |
| 3524 | /* niagara doesn't have read and exec bits */ |
| 3525 | if (mmup->is_immu) |
| 3526 | tep->flags = SS_TLB_FLAG_EXEC; |
| 3527 | else |
| 3528 | tep->flags = SS_TLB_FLAG_READ; |
| 3529 | if ( ((data>>1)&1) ) tep->flags |= SS_TLB_FLAG_WRITE; |
| 3530 | if ( ((data>>2)&1) ) tep->flags |= SS_TLB_FLAG_PRIV; |
| 3531 | if ( ((data>>6)&1) ) tep->flags |= SS_TLB_FLAG_LOCKED; |
| 3532 | if ( (data&BIT(39)) == 0 ) tep->flags |= SS_TLB_FLAG_CP; |
| 3533 | if ( ((data>>3)&1) ) tep->flags |= SS_TLB_FLAG_E; |
| 3534 | if ( ((data>>60)&1) ) tep->flags |= SS_TLB_FLAG_NFO; |
| 3535 | if ( ((data>>59)&1) ) tep->flags |= SS_TLB_FLAG_IE; |
| 3536 | |
| 3537 | /* Finally insert the new entry into the hash table for the TLB */ |
| 3538 | |
| 3539 | /* Hash uses match_context so it skews real->phys entries away from context 0 */ |
| 3540 | i = tag >> SS_MAX_PAGE_SIZE_BITS; |
| 3541 | i += match_context + partid; |
| 3542 | i &= SS_TLB_HASH_MASK; |
| 3543 | |
| 3544 | if (((data >> SUN4U_TTED_V_BIT)&1) != 0) { |
| 3545 | tep->hashidx = i; /* to help with unhooking later */ |
| 3546 | |
| 3547 | tep->nextp = tlbp->hash[i].ptr; |
| 3548 | tlbp->hash[i].ptr = tep; |
| 3549 | } else { |
| 3550 | tep->nextp = tlbp->freep; |
| 3551 | tlbp->freep = tep; |
| 3552 | } |
| 3553 | |
| 3554 | RW_unlock(&tlbp->rwlock); |
| 3555 | |
| 3556 | if (need_flush) { |
| 3557 | if (mmup->is_immu) |
| 3558 | sp->xicache_trans_flush_pending = true; |
| 3559 | else |
| 3560 | sp->xdcache_trans_flush_pending = true; |
| 3561 | if (tlbp->shares > 1) { |
| 3562 | ss_tlb_flush_shares(sp, tlbp, mmup->is_immu); |
| 3563 | } |
| 3564 | } |
| 3565 | |
| 3566 | return true; |
| 3567 | } |
| 3568 | |
| 3569 | |
| 3570 | /* |
| 3571 | * Dumb function to shuffle the sun4v TTE format into the sun4u |
| 3572 | * one used internally by Niagara. |
| 3573 | */ |
| 3574 | |
| 3575 | #define SHIFT_FIELD(_data, _hi,_lo,_new) (((((uint64_t)(_data))&MASK64(_hi,_lo))>>(_lo))<<(_new)) |
| 3576 | |
| 3577 | uint64_t niagara_shuffle_sun4v_format(uint64_t data) |
| 3578 | { |
| 3579 | uint64_t val; |
| 3580 | |
| 3581 | val = data & MASK64(63,63); /* valid bit */ |
| 3582 | |
| 3583 | val |= SHIFT_FIELD(data, 62,62, 60); /* NFO */ |
| 3584 | val |= SHIFT_FIELD(data, 61,61, 6); /* locked */ |
| 3585 | val |= SHIFT_FIELD(data, 39,13, 13); /* pa */ |
| 3586 | val |= SHIFT_FIELD(data, 12,12, 59); /* invert endianness */ |
| 3587 | val |= SHIFT_FIELD(data, 11,11, 3); /* side effect */ |
| 3588 | val |= SHIFT_FIELD(data, 10, 9, 4); /* cacheable bits */ |
| 3589 | val |= SHIFT_FIELD(data, 8, 8, 2); /* privileged */ |
| 3590 | val |= SHIFT_FIELD(data, 6, 6, 1); /* writeable */ |
| 3591 | val |= SHIFT_FIELD(data, 2, 2, 48); /* size[2] */ |
| 3592 | val |= SHIFT_FIELD(data, 1, 0, 61); /* size[1:0] */ |
| 3593 | |
| 3594 | return val; |
| 3595 | } |
| 3596 | |
| 3597 | |
| 3598 | /* |
| 3599 | * Pseudo devices |
| 3600 | */ |
| 3601 | #ifndef NDEBUG /* { */ |
| 3602 | char * ss_ssi_reg_name(uint_t reg) |
| 3603 | { |
| 3604 | char * s; |
| 3605 | switch (reg) { |
| 3606 | case NI_SSI_TIMEOUT: s="ssi_timeout"; break; |
| 3607 | case NI_SSI_LOG: s="ssi_log"; break; |
| 3608 | default: s="Illegal ssi register"; break; |
| 3609 | } |
| 3610 | |
| 3611 | return s; |
| 3612 | } |
| 3613 | |
| 3614 | char * ss_jbi_reg_name(uint_t reg) |
| 3615 | { |
| 3616 | char * s; |
| 3617 | switch (reg) { |
| 3618 | case NI_JBI_CONFIG1: s="jbi_config1"; break; |
| 3619 | case NI_JBI_CONFIG2: s="jbi_config2"; break; |
| 3620 | case NI_JBI_INT_MRGN: s="jbi_int_mrgn"; break; |
| 3621 | case NI_JBI_DEBUG: s="jbi_debug"; break; |
| 3622 | case NI_JBI_DEBUG_ARB: s="jbi_debug_arb"; break; |
| 3623 | case NI_JBI_PERF_CTL: s="jbi_perf_ctl"; break; |
| 3624 | case NI_JBI_PERF_CNT: s="jbi_perf_cnt"; break; |
| 3625 | case NI_JBI_ERR_INJECT: s="jbi_err_inject"; break; |
| 3626 | case NI_JBI_ERR_CONFIG: s="jbi_err_config"; break; |
| 3627 | case NI_JBI_ERROR_LOG: s="jbi_error_log"; break; |
| 3628 | case NI_JBI_ERROR_OVF: s="jbi_error_ovf"; break; |
| 3629 | case NI_JBI_LOG_ENB: s="jbi_log_enb"; break; |
| 3630 | case NI_JBI_SIG_ENB: s="jbi_sig_enb"; break; |
| 3631 | case NI_JBI_LOG_ADDR: s="jbi_log_addr"; break; |
| 3632 | case NI_JBI_LOG_CTRL: s="jbi_log_ctrl"; break; |
| 3633 | case NI_JBI_LOG_DATA0: s="jbi_log_data0"; break; |
| 3634 | case NI_JBI_LOG_DATA1: s="jbi_log_data1"; break; |
| 3635 | case NI_JBI_LOG_PAR: s="jbi_log_par"; break; |
| 3636 | case NI_JBI_LOG_NACK: s="jbi_log_nack"; break; |
| 3637 | case NI_JBI_LOG_ARB: s="jbi_log_arb"; break; |
| 3638 | case NI_JBI_L2_TIMEOUT: s="jbi_l2_timeout"; break; |
| 3639 | case NI_JBI_ARB_TIMEOUT: s="jbi_arb_timeout"; break; |
| 3640 | case NI_JBI_TRANS_TIMEOUT: s="jbi_trans_timeout"; break; |
| 3641 | case NI_JBI_INTR_TIMEOUT: s="jbi_intr_timeout"; break; |
| 3642 | case NI_JBI_MEMSIZE: s="jbi_memsize"; break; |
| 3643 | default: s="Illegal jbi register"; break; |
| 3644 | } |
| 3645 | |
| 3646 | return s; |
| 3647 | } |
| 3648 | |
| 3649 | char * ss_jbus_reg_name(uint_t reg) |
| 3650 | { |
| 3651 | char * s; |
| 3652 | switch (reg) { |
| 3653 | case NI_J_INT_DATA0: s="j_int_data0"; break; |
| 3654 | case NI_J_INT_DATA1: s="j_int_data1"; break; |
| 3655 | case NI_J_INT_ADATA0: s="j_int_adata0"; break; |
| 3656 | case NI_J_INT_ADATA1: s="j_int_adata1"; break; |
| 3657 | case NI_J_INT_BUSY: s="j_int_busy"; break; |
| 3658 | case NI_J_INT_ABUSY: s="j_int_abusy"; break; |
| 3659 | default: s="Illegal jbus register"; break; |
| 3660 | } |
| 3661 | |
| 3662 | return s; |
| 3663 | } |
| 3664 | |
| 3665 | char * ss_iob_reg_name(uint_t reg) |
| 3666 | { |
| 3667 | char * s; |
| 3668 | switch (reg) { |
| 3669 | case NI_INT_MAN0: s="int_man0"; break; |
| 3670 | case NI_INT_MAN1: s="int_man1"; break; |
| 3671 | case NI_INT_MAN2: s="int_man2"; break; |
| 3672 | case NI_INT_MAN3: s="int_man3"; break; |
| 3673 | case NI_INT_CTL0: s="int_ctl0"; break; |
| 3674 | case NI_INT_CTL1: s="int_ctl1"; break; |
| 3675 | case NI_INT_CTL2: s="int_ctl2"; break; |
| 3676 | case NI_INT_CTL3: s="int_ctl3"; break; |
| 3677 | case NI_INT_VEC_DIS: s="int_vec_dis"; break; |
| 3678 | case NI_J_INT_VEC: s="j_int_vec"; break; |
| 3679 | case NI_RSET_STAT: s="rset_stat"; break; |
| 3680 | case NI_TM_STAT_CTL: s="tm_stat_ctl"; break; |
| 3681 | case NI_PROC_SER_NUM: s="proc_ser_num"; break; |
| 3682 | case NI_CORE_AVAIL: s="core_avail"; break; |
| 3683 | case NI_IOB_FUSE: s="iob_fuse"; break; |
| 3684 | case NI_INT_MRGN_REG: s="int_mrgn_reg"; break; |
| 3685 | case NI_L2_VIS_CONTROL: s="l2_vis_control"; break; |
| 3686 | case NI_L2_VIS_MASK_A: s="l2_vis_mask_a"; break; |
| 3687 | case NI_L2_VIS_MASK_B: s="l2_vis_mask_b"; break; |
| 3688 | case NI_L2_VIS_COMPARE_A: s="l2_vis_compare_a"; break; |
| 3689 | case NI_L2_VIS_COMPARE_B: s="l2_vis_compare_b"; break; |
| 3690 | case NI_L2_TRIG_DELAY: s="l2_trig_delay"; break; |
| 3691 | case NI_IOB_VIS_SELECT: s="iob_vis_select"; break; |
| 3692 | case NI_DB_ENET_CONTROL: s="db_enet_control"; break; |
| 3693 | case NI_DB_ENET_IDLEVAL: s="db_enet_idleval"; break; |
| 3694 | case NI_DB_JBUS_CONTROL: s="db_jbus_control"; break; |
| 3695 | case NI_DB_JBUS_MASK0: s="db_jbus_mask0"; break; |
| 3696 | case NI_DB_JBUS_MASK1: s="db_jbus_mask1"; break; |
| 3697 | case NI_DB_JBUS_MASK2: s="db_jbus_mask2"; break; |
| 3698 | case NI_DB_JBUS_MASK3: s="db_jbus_mask3"; break; |
| 3699 | case NI_DB_JBUS_COMPARE0: s="db_jbus_compare0"; break; |
| 3700 | case NI_DB_JBUS_COMPARE1: s="db_jbus_compare1"; break; |
| 3701 | case NI_DB_JBUS_COMPARE2: s="db_jbus_compare2"; break; |
| 3702 | case NI_DB_JBUS_COMPARE3: s="db_jbus_compare3"; break; |
| 3703 | case NI_DB_JBUS_COUNT: s="db_jbus_count"; break; |
| 3704 | default: s="Illegal clock register"; break; |
| 3705 | } |
| 3706 | |
| 3707 | return s; |
| 3708 | } |
| 3709 | |
| 3710 | char * ss_clock_reg_name(uint_t reg) |
| 3711 | { |
| 3712 | char * s; |
| 3713 | switch (reg) { |
| 3714 | case SS_CLOCK_DIVIDER: s="divider"; break; |
| 3715 | case SS_CLOCK_CONTROL: s="control"; break; |
| 3716 | case SS_CLOCK_DLL_CONTROL: s="dll_control"; break; |
| 3717 | case SS_CLOCK_JBUS_SYNC: s="jbus_sync"; break; |
| 3718 | case SS_CLOCK_DLL_BYPASS: s="dll_bypass"; break; |
| 3719 | case SS_CLOCK_DRAM_SYNC: s="dram_sync"; break; |
| 3720 | case SS_CLOCK_VERSION: s="version"; break; |
| 3721 | default: s="Illegal clock register"; break; |
| 3722 | } |
| 3723 | |
| 3724 | return s; |
| 3725 | } |
| 3726 | |
| 3727 | char * ss_l2_ctrl_reg_name(uint_t reg) |
| 3728 | { |
| 3729 | char * s; |
| 3730 | switch (reg) { |
| 3731 | case SS_L2_DIAG_DATA: s="diag_data"; break; |
| 3732 | case SS_L2_DIAG_TAG: s="diag_tag"; break; |
| 3733 | case SS_L2_DIAG_VUAD: s="diag_vuad"; break; |
| 3734 | case SS_L2_CONTROL: s="control"; break; |
| 3735 | case SS_L2_ERROR_ENABLE: s="error_enable"; break; |
| 3736 | case SS_L2_ERROR_STATUS: s="error_status"; break; |
| 3737 | case SS_L2_ERROR_ADDRESS: s="error_address"; break; |
| 3738 | case SS_L2_ERROR_INJECT: s="error_inject"; break; |
| 3739 | default: s="Illegal L2 control register"; break; |
| 3740 | } |
| 3741 | |
| 3742 | return s; |
| 3743 | } |
| 3744 | |
| 3745 | char * ss_dram_ctrl_reg_name(uint_t reg) |
| 3746 | { |
| 3747 | char * s; |
| 3748 | switch (reg) { |
| 3749 | case SS_DRAM_CAS_ADDR_WIDTH: s="cas_addr_width"; break; |
| 3750 | case SS_DRAM_CAS_LAT: s="cas_lat"; break; |
| 3751 | case SS_DRAM_CHANNEL_DISABLED: s="channel_disabled"; break; |
| 3752 | case SS_DRAM_DBG_TRG_EN: s="dbg_trg_en"; break; |
| 3753 | case SS_DRAM_DIMM_INIT: s="dimm_init"; break; |
| 3754 | case SS_DRAM_DIMM_PRESENT: s="dimm_present"; break; |
| 3755 | case SS_DRAM_DIMM_STACK: s="dimm_stack"; break; |
| 3756 | case SS_DRAM_DRAM_TRCD: s="dram_trcd"; break; |
| 3757 | case SS_DRAM_ERROR_ADDRESS: s="error_address"; break; |
| 3758 | case SS_DRAM_ERROR_COUNTER: s="error_counter"; break; |
| 3759 | case SS_DRAM_ERROR_INJECT: s="error_inject"; break; |
| 3760 | case SS_DRAM_ERROR_LOCATION: s="error_location"; break; |
| 3761 | case SS_DRAM_ERROR_STATUS: s="error_status"; break; |
| 3762 | case SS_DRAM_EXT_WR_MODE1: s="ext_wr_mode1"; break; |
| 3763 | case SS_DRAM_EXT_WR_MODE2: s="ext_wr_mode2"; break; |
| 3764 | case SS_DRAM_EXT_WR_MODE3: s="ext_wr_mode3"; break; |
| 3765 | case SS_DRAM_FAILOVER_MASK: s="failover_mask"; break; |
| 3766 | case SS_DRAM_FAILOVER_STATUS: s="failover_status"; break; |
| 3767 | case SS_DRAM_HW_DMUX_CLK_INV: s="hw_dmux_clk_inv"; break; |
| 3768 | case SS_DRAM_INIT_STATUS: s="init_status"; break; |
| 3769 | case SS_DRAM_MODE_WRITE_STATUS: s="mode_write_status"; break; |
| 3770 | case SS_DRAM_OPEN_BANK_MAX: s="open_bank_max"; break; |
| 3771 | case SS_DRAM_PAD_EN_CLK_INV: s="pad_en_clk_inv"; break; |
| 3772 | case SS_DRAM_PERF_COUNT: s="perf_count"; break; |
| 3773 | case SS_DRAM_PERF_CTL: s="perf_ctl"; break; |
| 3774 | case SS_DRAM_PRECHARGE_WAIT: s="precharge_wait"; break; |
| 3775 | case SS_DRAM_PROG_TIME_CNTR: s="prog_time_cntr"; break; |
| 3776 | case SS_DRAM_RANK1_PRESENT: s="rank1_present"; break; |
| 3777 | case SS_DRAM_RAS_ADDR_WIDTH: s="ras_addr_width"; break; |
| 3778 | case SS_DRAM_REFRESH_COUNTER: s="refresh_counter"; break; |
| 3779 | case SS_DRAM_REFRESH_FREQ: s="refresh_freq"; break; |
| 3780 | case SS_DRAM_SCRUB_ENABLE: s="scrub_enable"; break; |
| 3781 | case SS_DRAM_SCRUB_FREQ: s="scrub_freq"; break; |
| 3782 | case SS_DRAM_SEL_LO_ADDR_BITS: s="sel_lo_addr_bits"; break; |
| 3783 | case SS_DRAM_SW_DV_COUNT: s="sw_dv_count"; break; |
| 3784 | case SS_DRAM_TIWTR: s="tiwtr"; break; |
| 3785 | case SS_DRAM_TMRD: s="tmrd"; break; |
| 3786 | case SS_DRAM_TRAS: s="tras"; break; |
| 3787 | case SS_DRAM_TRC: s="trc"; break; |
| 3788 | case SS_DRAM_TRFC: s="trfc"; break; |
| 3789 | case SS_DRAM_TRP: s="trp"; break; |
| 3790 | case SS_DRAM_TRRD: s="trrd"; break; |
| 3791 | case SS_DRAM_TRTP: s="trtp"; break; |
| 3792 | case SS_DRAM_TRTW: s="trtw"; break; |
| 3793 | case SS_DRAM_TWR: s="twr"; break; |
| 3794 | case SS_DRAM_TWTR: s="twtr"; break; |
| 3795 | case SS_DRAM_WAIR_CONTROL: s="wair_control"; break; |
| 3796 | default: s="Illegal DRAM control register"; break; |
| 3797 | } |
| 3798 | |
| 3799 | return s; |
| 3800 | } |
| 3801 | #endif /* } */ |
| 3802 | |
| 3803 | static void ss_ssi_init(config_dev_t * config_devp) |
| 3804 | { |
| 3805 | ss_proc_t * npp; |
| 3806 | |
| 3807 | npp = (ss_proc_t *)config_devp->devp; |
| 3808 | npp->ssip->timeout = 0; |
| 3809 | npp->ssip->log = 0; |
| 3810 | } |
| 3811 | |
| 3812 | |
| 3813 | static void ss_jbi_init(config_dev_t * config_devp) |
| 3814 | { |
| 3815 | ss_proc_t * npp; |
| 3816 | ss_jbi_t * jbip; |
| 3817 | |
| 3818 | npp = (ss_proc_t *)config_devp->devp; |
| 3819 | jbip = npp->jbip; |
| 3820 | |
| 3821 | jbip->config1 = JBI_PORT_LOCN(0x7f) | JBI_PORT_PRES(0x3) | JBI_MID(0x3e); |
| 3822 | jbip->config2 = JBI_IQ_HIGH(0x7); |
| 3823 | jbip->int_mrgn = 0x1515; |
| 3824 | jbip->debug = 0x0; |
| 3825 | jbip->debug_arb = 0x0; |
| 3826 | jbip->perf_ctl = 0x0; |
| 3827 | jbip->perf_cnt = 0x0; |
| 3828 | jbip->err_inject = 0x0; |
| 3829 | jbip->err_config = 0x0; |
| 3830 | jbip->error_log = 0x0; |
| 3831 | jbip->error_ovf = 0x0; |
| 3832 | jbip->log_enb = 0x0; |
| 3833 | jbip->sig_enb = 0x0; |
| 3834 | jbip->log_addr = 0x0; |
| 3835 | jbip->log_ctrl = 0x0; |
| 3836 | jbip->log_data0 = 0x0; |
| 3837 | jbip->log_data1 = 0x0; |
| 3838 | jbip->log_par = 0x0; |
| 3839 | jbip->log_nack = 0x0; |
| 3840 | jbip->log_arb = 0x0; |
| 3841 | jbip->l2_timeout = 0x0; |
| 3842 | jbip->arb_timeout = 0x0; |
| 3843 | jbip->trans_timeout = 0x0; |
| 3844 | jbip->intr_timeout = 0x0; |
| 3845 | jbip->memsize = 0x0; |
| 3846 | } |
| 3847 | |
| 3848 | static void ss_jbus_init(config_dev_t * config_devp) |
| 3849 | { |
| 3850 | ss_proc_t * npp; |
| 3851 | ss_jbus_t * jbusp; |
| 3852 | uint_t i; |
| 3853 | |
| 3854 | npp = (ss_proc_t *)config_devp->devp; |
| 3855 | jbusp = npp->jbusp; |
| 3856 | |
| 3857 | for (i = 0; i < IOB_JBUS_TARGETS; i++) { |
| 3858 | jbusp->j_int_data0[i] = 0x00; |
| 3859 | jbusp->j_int_data1[i] = 0x00; |
| 3860 | jbusp->j_int_busy[i] = 0x00; |
| 3861 | } |
| 3862 | pthread_mutex_init(&jbusp->lock, NULL); |
| 3863 | } |
| 3864 | |
| 3865 | static void ss_iob_init(config_dev_t * config_devp) |
| 3866 | { |
| 3867 | ss_proc_t * npp; |
| 3868 | ss_iob_t * iobp; |
| 3869 | uint64_t avail, cores, device; |
| 3870 | |
| 3871 | npp = (ss_proc_t *)config_devp->devp; |
| 3872 | iobp = npp->iobp; |
| 3873 | |
| 3874 | pthread_mutex_init(&iobp->iob_lock, NULL); |
| 3875 | |
| 3876 | /* IOB Interrupt Registers section 7.3 of PRM 1.2 */ |
| 3877 | for (device=0; device<IOB_DEV_MAX; device++) { |
| 3878 | iobp->int_man[device] = 0x0000; |
| 3879 | iobp->int_ctl[device] = IOB_INT_CTL_MASK; |
| 3880 | } |
| 3881 | iobp->int_vec_dis = 0x0000; |
| 3882 | iobp->j_int_vec = 0x0000; |
| 3883 | pthread_mutex_init(&iobp->int_vec_lock, NULL); /* FIXME: to go away ! */ |
| 3884 | |
| 3885 | /* Reset Status Register section 11.2 of PRM 1.2 */ |
| 3886 | iobp->rset_stat = 0x0004; /* POR bit */ |
| 3887 | |
| 3888 | /* CPU throttle control section 16.1 of PRM 1.2 */ |
| 3889 | iobp->tm_stat_ctl = 0x0000; |
| 3890 | |
| 3891 | /* EFUSE Registers section 18.8 of PRM 1.2 */ |
| 3892 | iobp->proc_ser_num = 0x0000; |
| 3893 | iobp->iob_fuse = 0x0000; |
| 3894 | |
| 3895 | /* Internal Margin Register section 19.1.3 of PRM 1.2 */ |
| 3896 | iobp->int_mrgn_reg = 0x0000; |
| 3897 | |
| 3898 | /* IOB Visibility Port Support section 19.2 of PRM 1.2 */ |
| 3899 | iobp->l2_vis_control = 0x0000; |
| 3900 | iobp->l2_vis_mask_a = 0x0000; |
| 3901 | iobp->l2_vis_mask_b = 0x0000; |
| 3902 | iobp->l2_vis_compare_a = 0x0000; |
| 3903 | iobp->l2_vis_compare_b = 0x0000; |
| 3904 | iobp->l2_trig_delay = 0x0000; |
| 3905 | iobp->iob_vis_select = 0x0000; |
| 3906 | iobp->db_enet_control = 0x0000; |
| 3907 | iobp->db_enet_idleval = 0x0000; |
| 3908 | iobp->db_jbus_control = 0x0000; |
| 3909 | iobp->db_jbus_mask0 = 0x0000; |
| 3910 | iobp->db_jbus_mask1 = 0x0000; |
| 3911 | iobp->db_jbus_mask2 = 0x0000; |
| 3912 | iobp->db_jbus_mask3 = 0x0000; |
| 3913 | iobp->db_jbus_compare0 = 0x0000; |
| 3914 | iobp->db_jbus_compare1 = 0x0000; |
| 3915 | iobp->db_jbus_compare2 = 0x0000; |
| 3916 | iobp->db_jbus_compare3 = 0x0000; |
| 3917 | iobp->db_jbus_count = 0x0000; |
| 3918 | } |
| 3919 | |
| 3920 | static void ss_clock_init(config_dev_t * config_devp) |
| 3921 | { |
| 3922 | ss_proc_t * npp; |
| 3923 | ss_clock_t * clockp; |
| 3924 | |
| 3925 | npp = (ss_proc_t *)config_devp->devp; |
| 3926 | clockp = npp->clockp; |
| 3927 | |
| 3928 | /* Clock Unit section 11.1 of PRM 1.2 */ |
| 3929 | clockp->divider = 0x80200200101004; |
| 3930 | clockp->control = 0x0000; |
| 3931 | clockp->dll_control = 0x0000; |
| 3932 | clockp->dll_bypass = 0x0000; |
| 3933 | clockp->jbus_sync = 0x0000; |
| 3934 | clockp->dram_sync = 0x0000; |
| 3935 | clockp->version = 0x0000; |
| 3936 | |
| 3937 | } |
| 3938 | |
| 3939 | static void ss_l2_ctl_init(config_dev_t * config_devp) |
| 3940 | { |
| 3941 | uint_t bank, idx; |
| 3942 | ss_proc_t * npp; |
| 3943 | ss_l2_cache_t * l2p; |
| 3944 | |
| 3945 | npp = (ss_proc_t *)config_devp->devp; |
| 3946 | l2p = npp->l2p; |
| 3947 | |
| 3948 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 3949 | l2p->control[bank] = L2_DIS; |
| 3950 | l2p->bist_ctl[bank] = 0x0; |
| 3951 | l2p->error_enable[bank] = 0x0; |
| 3952 | l2p->error_status[bank] = 0x0; |
| 3953 | l2p->error_address[bank] = 0x0; |
| 3954 | l2p->error_inject[bank] = 0x0; |
| 3955 | } |
| 3956 | |
| 3957 | l2p->diag_datap = Xmalloc(L2_DATA_SIZE); |
| 3958 | l2p->diag_tagp = Xmalloc(L2_TAG_SIZE); |
| 3959 | l2p->diag_vuadp = Xmalloc(L2_VUAD_SIZE); |
| 3960 | |
| 3961 | for (idx=0; idx<L2_DATA_SIZE/8; idx++) { |
| 3962 | l2p->diag_datap[idx] = 0xdeadbeef; |
| 3963 | } |
| 3964 | |
| 3965 | for (idx=0; idx<L2_TAG_SIZE/8; idx++) { |
| 3966 | l2p->diag_tagp[idx] = 0xdeadbeef; |
| 3967 | } |
| 3968 | |
| 3969 | for (idx=0; idx<L2_VUAD_SIZE/8; idx++) { |
| 3970 | l2p->diag_vuadp[idx] = 0xdeadbeef; |
| 3971 | } |
| 3972 | } |
| 3973 | |
| 3974 | static void ss_dram_ctl_init(config_dev_t * config_devp) |
| 3975 | { |
| 3976 | uint_t bidx; |
| 3977 | ss_proc_t * npp; |
| 3978 | ss_dram_bank_t * dbp; |
| 3979 | |
| 3980 | npp = (ss_proc_t *)config_devp->devp; |
| 3981 | |
| 3982 | for (bidx=0; bidx<npp->num_mbanks; bidx++) { |
| 3983 | /* DRAM controller section 15.5 of PRM 1.2 */ |
| 3984 | dbp = &(npp->mbankp[bidx]); |
| 3985 | dbp->cas_addr_width = 0xb ; |
| 3986 | dbp->ras_addr_width = 0xf ; |
| 3987 | dbp->cas_lat = 0x3 ; |
| 3988 | dbp->scrub_freq = 0xfff ; |
| 3989 | dbp->refresh_freq = 0x514 ; |
| 3990 | dbp->refresh_counter = 0x0 ; |
| 3991 | dbp->scrub_enable = 0x0 ; |
| 3992 | dbp->trrd = 0x2 ; |
| 3993 | dbp->trc = 0xc ; |
| 3994 | dbp->dram_trcd = 0x3 ; |
| 3995 | dbp->twtr = 0x0 ; |
| 3996 | dbp->trtw = 0x0 ; |
| 3997 | dbp->trtp = 0x2 ; |
| 3998 | dbp->tras = 0x9 ; |
| 3999 | dbp->trp = 0x3 ; |
| 4000 | dbp->twr = 0x3 ; |
| 4001 | dbp->trfc = 0x27 ; |
| 4002 | dbp->tmrd = 0x2 ; |
| 4003 | dbp->tiwtr = 0x2 ; |
| 4004 | dbp->precharge_wait = 0x55 ; |
| 4005 | dbp->dimm_stack = 0x0 ; |
| 4006 | dbp->ext_wr_mode2 = 0x0 ; |
| 4007 | dbp->ext_wr_mode1 = 0x400 ; |
| 4008 | dbp->ext_wr_mode3 = 0x0 ; |
| 4009 | dbp->wair_control = 0x1 ; |
| 4010 | dbp->rank1_present = 0x0 ; |
| 4011 | dbp->channel_disabled = 0x0 ; |
| 4012 | dbp->sel_lo_addr_bits = 0x0 ; |
| 4013 | dbp->dimm_init = 0x0 ; |
| 4014 | dbp->sw_dv_count = 0x1 ; |
| 4015 | dbp->hw_dmux_clk_inv = 0x0 ; |
| 4016 | dbp->pad_en_clk_inv = 0x3<<2 ; |
| 4017 | dbp->mode_write_status = 0x0 ; |
| 4018 | dbp->init_status = 0x0 ; |
| 4019 | dbp->dimm_present = 0x3 ; |
| 4020 | dbp->failover_status = 0x0 ; |
| 4021 | dbp->failover_mask = 0x0 ; |
| 4022 | |
| 4023 | /* Performance counter section 10.3 of PRM 1.1 */ |
| 4024 | dbp->perf_ctl = 0x0 ; |
| 4025 | dbp->perf_count = 0x0 ; |
| 4026 | |
| 4027 | /* Error handling section 12.9 of PRM 1.1 */ |
| 4028 | dbp->error_status = 0x0 ; /* FIXME: only bits 56-16 reset on POR .. everything else to be preserved */ |
| 4029 | dbp->error_address = 0x0 ; /* FIXME: bits 39-4 to be preserved accross POR */ |
| 4030 | dbp->error_inject = 0x0 ; |
| 4031 | dbp->error_counter = 0x0 ; /* FIXME: bits 17-0 preserved accross reset */ |
| 4032 | dbp->error_location = 0x0 ; /* FIXME: bits 35-0 preserved accross reset */ |
| 4033 | |
| 4034 | /* Power management section 16.2 of PRM 1.1 */ |
| 4035 | dbp->open_bank_max = 0x1ffff ; |
| 4036 | dbp->prog_time_cntr = 0xffff ; |
| 4037 | |
| 4038 | dbp->dbg_trg_en = (0x1<<7) | (0x1) ; /* Hardware debug section 19.1 of PRM 1.1 */ |
| 4039 | } |
| 4040 | } |
| 4041 | |
| 4042 | |
| 4043 | static bool_t ss_ssi_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp) |
| 4044 | { |
| 4045 | uint_t reg; |
| 4046 | uint64_t val; |
| 4047 | ss_proc_t *npp; |
| 4048 | ss_ssi_t * ssip; |
| 4049 | |
| 4050 | if (MA_ldu64!=op && MA_st64!=op) return false; |
| 4051 | |
| 4052 | npp = (ss_proc_t *)config_addrp->config_devp->devp; |
| 4053 | ssip = npp->ssip; |
| 4054 | |
| 4055 | reg = off & 0x1ffff; |
| 4056 | |
| 4057 | switch (op) { |
| 4058 | case MA_st64: |
| 4059 | val = *regp; |
| 4060 | |
| 4061 | switch (reg) { |
| 4062 | case NI_SSI_TIMEOUT: |
| 4063 | if (0LL != (val & ~(MASK64(24,0)))) goto write_reserved; |
| 4064 | ssip->timeout &= ~(MASK64(24,0)); |
| 4065 | ssip->timeout |= val; |
| 4066 | break; |
| 4067 | case NI_SSI_LOG: |
| 4068 | if (0LL != (val & ~(MASK64(1,0)))) goto write_reserved; |
| 4069 | ssip->timeout &= ~val; |
| 4070 | break; |
| 4071 | default: |
| 4072 | /* illegal reg - an error */ |
| 4073 | return false; |
| 4074 | } |
| 4075 | break; |
| 4076 | |
| 4077 | write_reserved: |
| 4078 | EXEC_WARNING( ("Attempted write to reserved field in ssi:" |
| 4079 | "Write 0x%llx to register %s (offset 0x%x)", |
| 4080 | val, ss_ssi_reg_name(reg), reg ) ); |
| 4081 | return false; |
| 4082 | |
| 4083 | case MA_ldu64: |
| 4084 | switch (reg) { |
| 4085 | case NI_SSI_TIMEOUT: |
| 4086 | val = ssip->timeout & MASK64(24,0); |
| 4087 | break; |
| 4088 | case NI_SSI_LOG: |
| 4089 | val = ssip->log & MASK64(1,0); |
| 4090 | break; |
| 4091 | default: |
| 4092 | /* illegal reg - an error */ |
| 4093 | return false; |
| 4094 | } |
| 4095 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) |
| 4096 | *regp = val; |
| 4097 | break; |
| 4098 | |
| 4099 | default: |
| 4100 | ASSERT(0); |
| 4101 | } |
| 4102 | |
| 4103 | return true; |
| 4104 | } |
| 4105 | |
| 4106 | |
| 4107 | static bool_t ss_jbi_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp) |
| 4108 | { |
| 4109 | uint_t reg; |
| 4110 | uint64_t val; |
| 4111 | ss_proc_t *npp; |
| 4112 | ss_jbi_t * jbip; |
| 4113 | |
| 4114 | if (MA_ldu64!=op && MA_st64!=op) return false; |
| 4115 | |
| 4116 | npp = (ss_proc_t *)config_addrp->config_devp->devp; |
| 4117 | jbip = npp->jbip; |
| 4118 | |
| 4119 | if (npp->rust_jbi_stores && |
| 4120 | op == MA_st64 && (off & 0x7000000) == 0x7000000) |
| 4121 | return true; |
| 4122 | |
| 4123 | reg = off & 0xfffff; |
| 4124 | |
| 4125 | switch (op) { |
| 4126 | case MA_st64: |
| 4127 | val = *regp; |
| 4128 | |
| 4129 | /* FIXME!! ignore write to reserved bits for BRINGUP ONLY */ |
| 4130 | #define ASSIGN_JBI(_n, _m) do { \ |
| 4131 | jbip->_n &= ~(_m); \ |
| 4132 | jbip->_n |= (val & (_m)); \ |
| 4133 | } while (0) |
| 4134 | |
| 4135 | #define ASSIGN_W1C_JBI(_n, _m) do { \ |
| 4136 | jbip->_n &= (~val | ~(_m)); \ |
| 4137 | } while (0) |
| 4138 | |
| 4139 | switch (reg) { |
| 4140 | /* JBUS Interface section 14.1 of PRM 1.4 */ |
| 4141 | case NI_JBI_CONFIG1: |
| 4142 | ASSIGN_JBI( config1, MASK64(50,44)|MASK64(39,38)| |
| 4143 | MASK64(31,22)|MASK64(1,0) ); |
| 4144 | break; |
| 4145 | case NI_JBI_CONFIG2: |
| 4146 | ASSIGN_JBI( config2, MASK64(30,28)|MASK64(26,24)| |
| 4147 | MASK64(21,20)|MASK64(17,8)|MASK64(3,0) ); |
| 4148 | break; |
| 4149 | case NI_JBI_INT_MRGN: |
| 4150 | ASSIGN_JBI( int_mrgn, MASK64(12,8)|MASK64(4,0) ); |
| 4151 | break; |
| 4152 | case NI_JBI_DEBUG: |
| 4153 | ASSIGN_JBI( debug, MASK64(0,0) ); |
| 4154 | break; |
| 4155 | case NI_JBI_DEBUG_ARB: |
| 4156 | ASSIGN_JBI( debug_arb, MASK64(24,24)|MASK64(22,18)| |
| 4157 | MASK64(16,0) ); |
| 4158 | break; |
| 4159 | case NI_JBI_PERF_CTL: |
| 4160 | ASSIGN_JBI( perf_ctl, MASK64(7,0) ); |
| 4161 | break; |
| 4162 | case NI_JBI_PERF_CNT: |
| 4163 | ASSIGN_JBI( perf_cnt, MASK64(63,0) ); |
| 4164 | break; |
| 4165 | case NI_JBI_ERR_INJECT: |
| 4166 | ASSIGN_JBI( err_inject, MASK64(30,0) ); |
| 4167 | break; |
| 4168 | case NI_JBI_ERR_CONFIG: |
| 4169 | ASSIGN_JBI( err_config, MASK64(4,2) ); |
| 4170 | break; |
| 4171 | case NI_JBI_ERROR_LOG: |
| 4172 | ASSIGN_W1C_JBI( error_log, MASK64(28,24)|MASK64(17,8)| |
| 4173 | MASK64(5,4)|MASK64(2,0) ); |
| 4174 | break; |
| 4175 | case NI_JBI_ERROR_OVF: |
| 4176 | ASSIGN_W1C_JBI( error_ovf, MASK64(28,24)|MASK64(17,8)| |
| 4177 | MASK64(5,4)|MASK64(2,0) ); |
| 4178 | break; |
| 4179 | case NI_JBI_LOG_ENB: |
| 4180 | ASSIGN_JBI( log_enb, MASK64(28,24)|MASK64(17,8)| |
| 4181 | MASK64(5,4)|MASK64(2,0) ); |
| 4182 | break; |
| 4183 | case NI_JBI_SIG_ENB: |
| 4184 | ASSIGN_JBI( sig_enb, MASK64(28,24)|MASK64(17,8)| |
| 4185 | MASK64(5,4)|MASK64(2,0) ); |
| 4186 | break; |
| 4187 | case NI_JBI_LOG_ADDR: |
| 4188 | case NI_JBI_LOG_CTRL: |
| 4189 | case NI_JBI_LOG_DATA0: |
| 4190 | case NI_JBI_LOG_DATA1: |
| 4191 | case NI_JBI_LOG_PAR: |
| 4192 | case NI_JBI_LOG_ARB: |
| 4193 | goto write_reserved; |
| 4194 | case NI_JBI_LOG_NACK: |
| 4195 | ASSIGN_W1C_JBI( log_nack, MASK64(31,0) ); |
| 4196 | break; |
| 4197 | case NI_JBI_L2_TIMEOUT: |
| 4198 | ASSIGN_JBI( l2_timeout, MASK64(31,0) ); |
| 4199 | break; |
| 4200 | case NI_JBI_ARB_TIMEOUT: |
| 4201 | ASSIGN_JBI( arb_timeout, MASK64(31,0) ); |
| 4202 | break; |
| 4203 | case NI_JBI_TRANS_TIMEOUT: |
| 4204 | ASSIGN_JBI( trans_timeout, MASK64(31,0) ); |
| 4205 | break; |
| 4206 | case NI_JBI_INTR_TIMEOUT: |
| 4207 | ASSIGN_JBI( intr_timeout, MASK64(31,0) ); |
| 4208 | break; |
| 4209 | case NI_JBI_MEMSIZE: |
| 4210 | ASSIGN_JBI( memsize, MASK64(37,30) ); |
| 4211 | break; |
| 4212 | |
| 4213 | default: |
| 4214 | /* illegal reg - an error */ |
| 4215 | return false; |
| 4216 | } |
| 4217 | break; |
| 4218 | |
| 4219 | write_reserved: |
| 4220 | EXEC_WARNING( ("Attempted write to reserved field in jbi:" |
| 4221 | "Write 0x%llx to register %s (offset 0x%x)", |
| 4222 | val, ss_jbi_reg_name(reg), reg ) ); |
| 4223 | return false; |
| 4224 | |
| 4225 | case MA_ldu64: |
| 4226 | #define RETRIEVE_JBI(_n, _m) do { val = ((jbip->_n) & (_m)); } while (0) |
| 4227 | switch (reg) { |
| 4228 | /* JBUS Interface section 14.1 of PRM 1.4 */ |
| 4229 | case NI_JBI_CONFIG1: |
| 4230 | RETRIEVE_JBI( config1, MASK64(63,0) ); |
| 4231 | break; |
| 4232 | case NI_JBI_CONFIG2: |
| 4233 | RETRIEVE_JBI( config2, MASK64(63,0) ); |
| 4234 | break; |
| 4235 | case NI_JBI_INT_MRGN: |
| 4236 | RETRIEVE_JBI( int_mrgn, MASK64(12,8)|MASK64(4,0) ); |
| 4237 | break; |
| 4238 | case NI_JBI_DEBUG: |
| 4239 | RETRIEVE_JBI( debug, MASK64(63,0) ); |
| 4240 | break; |
| 4241 | case NI_JBI_DEBUG_ARB: |
| 4242 | RETRIEVE_JBI( debug_arb, MASK64(63,0) ); |
| 4243 | case NI_JBI_PERF_CTL: |
| 4244 | RETRIEVE_JBI( perf_ctl, MASK64(7,0) ); |
| 4245 | break; |
| 4246 | case NI_JBI_PERF_CNT: |
| 4247 | RETRIEVE_JBI( perf_cnt, MASK64(63,0) ); |
| 4248 | break; |
| 4249 | case NI_JBI_ERR_INJECT: |
| 4250 | RETRIEVE_JBI( err_inject, MASK64(30,0) ); |
| 4251 | break; |
| 4252 | /* JBI Error Registers section 12.12.2 of PRM 1.4 */ |
| 4253 | case NI_JBI_ERR_CONFIG: |
| 4254 | RETRIEVE_JBI( err_config, MASK64(4,2) ); |
| 4255 | break; |
| 4256 | case NI_JBI_ERROR_LOG: |
| 4257 | RETRIEVE_JBI( error_log, MASK64(28,24)|MASK64(17,8)| |
| 4258 | MASK64(5,4)|MASK64(2,0) ); |
| 4259 | break; |
| 4260 | case NI_JBI_ERROR_OVF: |
| 4261 | RETRIEVE_JBI( error_ovf, MASK64(28,24)|MASK64(17,8)| |
| 4262 | MASK64(5,4)|MASK64(2,0) ); |
| 4263 | break; |
| 4264 | case NI_JBI_LOG_ENB: |
| 4265 | RETRIEVE_JBI( log_enb, MASK64(28,24)|MASK64(17,8)| |
| 4266 | MASK64(5,4)|MASK64(2,0) ); |
| 4267 | break; |
| 4268 | case NI_JBI_SIG_ENB: |
| 4269 | RETRIEVE_JBI( sig_enb, MASK64(28,24)|MASK64(17,8)| |
| 4270 | MASK64(5,4)|MASK64(2,0) ); |
| 4271 | break; |
| 4272 | case NI_JBI_LOG_ADDR: |
| 4273 | RETRIEVE_JBI( log_addr, MASK64(63,0) ); |
| 4274 | break; |
| 4275 | case NI_JBI_LOG_CTRL: |
| 4276 | RETRIEVE_JBI( log_ctrl, MASK64(63,0) ); |
| 4277 | break; |
| 4278 | case NI_JBI_LOG_DATA0: |
| 4279 | RETRIEVE_JBI( log_data0, MASK64(63,0) ); |
| 4280 | break; |
| 4281 | case NI_JBI_LOG_DATA1: |
| 4282 | RETRIEVE_JBI( log_data1, MASK64(63,0) ); |
| 4283 | break; |
| 4284 | case NI_JBI_LOG_PAR: |
| 4285 | RETRIEVE_JBI( log_par, MASK64(32,32)|MASK64(25,20)| |
| 4286 | MASK64(13,8)|MASK64(6,0) ); |
| 4287 | break; |
| 4288 | case NI_JBI_LOG_NACK: |
| 4289 | RETRIEVE_JBI( log_nack, MASK64(31,0) ); |
| 4290 | break; |
| 4291 | case NI_JBI_LOG_ARB: |
| 4292 | RETRIEVE_JBI( log_arb, MASK64(34,32)|MASK64(26,24)| |
| 4293 | MASK64(22,16)|MASK64(14,8)|MASK64(6,0) ); |
| 4294 | break; |
| 4295 | case NI_JBI_L2_TIMEOUT: |
| 4296 | RETRIEVE_JBI( l2_timeout, MASK64(31,0) ); |
| 4297 | break; |
| 4298 | case NI_JBI_ARB_TIMEOUT: |
| 4299 | RETRIEVE_JBI( arb_timeout, MASK64(31,0) ); |
| 4300 | break; |
| 4301 | case NI_JBI_TRANS_TIMEOUT: |
| 4302 | RETRIEVE_JBI( trans_timeout, MASK64(31,0) ); |
| 4303 | break; |
| 4304 | case NI_JBI_INTR_TIMEOUT: |
| 4305 | RETRIEVE_JBI( intr_timeout, MASK64(31,0) ); |
| 4306 | break; |
| 4307 | case NI_JBI_MEMSIZE: |
| 4308 | RETRIEVE_JBI( memsize, MASK64(37,30) ); |
| 4309 | break; |
| 4310 | |
| 4311 | default: |
| 4312 | /* illegal reg - an error */ |
| 4313 | return false; |
| 4314 | } |
| 4315 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) |
| 4316 | *regp = val; |
| 4317 | break; |
| 4318 | |
| 4319 | default: |
| 4320 | ASSERT(0); |
| 4321 | } |
| 4322 | |
| 4323 | return true; |
| 4324 | } |
| 4325 | |
| 4326 | static bool_t ss_jbus_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp) |
| 4327 | { |
| 4328 | uint_t reg, target; |
| 4329 | uint64_t val; |
| 4330 | ss_proc_t *npp; |
| 4331 | ss_strand_t *nsp; |
| 4332 | ss_jbus_t *jbusp; |
| 4333 | sparcv9_cpu_t *v9p; |
| 4334 | |
| 4335 | if (MA_ldu64!=op && MA_st64!=op) return false; |
| 4336 | |
| 4337 | npp = (ss_proc_t *)config_addrp->config_devp->devp; |
| 4338 | nsp = v9p->impl_specificp; |
| 4339 | v9p = sp->specificp; |
| 4340 | jbusp = npp->jbusp; |
| 4341 | |
| 4342 | reg = off & 0xffff; |
| 4343 | |
| 4344 | switch (op) { |
| 4345 | case MA_st64: |
| 4346 | val = *regp; |
| 4347 | |
| 4348 | #define ASSIGN_JBUS(_n, _m) do { \ |
| 4349 | if (0LL != (val & ~(_m))) goto write_reserved; \ |
| 4350 | jbusp->_n = val; \ |
| 4351 | } while (0) |
| 4352 | |
| 4353 | if (reg >= NI_J_INT_BUSY) { |
| 4354 | reg = reg & 0xf00; /* for debug output */ |
| 4355 | if (reg < NI_J_INT_ABUSY) { |
| 4356 | ASSERT(reg == 0x900); |
| 4357 | target = (off >> 3) & (IOB_JBUS_TARGETS-1); |
| 4358 | ASSIGN_JBUS( j_int_busy[target], MASK64(5,5) ); |
| 4359 | } else { |
| 4360 | /* aliased to target thread's register */ |
| 4361 | ASSERT(reg == 0xb00); |
| 4362 | target = nsp->vcore_id; |
| 4363 | ASSIGN_JBUS( j_int_busy[target], MASK64(5,5) ); |
| 4364 | } |
| 4365 | return true; |
| 4366 | } |
| 4367 | |
| 4368 | switch (reg) { |
| 4369 | case NI_J_INT_DATA0: |
| 4370 | case NI_J_INT_DATA1: |
| 4371 | case NI_J_INT_ADATA0: |
| 4372 | case NI_J_INT_ADATA1: |
| 4373 | goto write_reserved; |
| 4374 | default: |
| 4375 | return false; /* illegal reg - an error */ |
| 4376 | } |
| 4377 | |
| 4378 | write_reserved: |
| 4379 | EXEC_WARNING( ("Attempted write to reserved field in JBUS:" |
| 4380 | "Write 0x%llx to register %s (offset 0x%x)", |
| 4381 | val, ss_jbus_reg_name(reg), reg ) ); |
| 4382 | return false; |
| 4383 | |
| 4384 | case MA_ldu64: |
| 4385 | reg = reg & 0xf00; /* for debug output */ |
| 4386 | |
| 4387 | switch (reg) { |
| 4388 | case NI_J_INT_DATA0: |
| 4389 | target = (off >> 3) & (IOB_JBUS_TARGETS-1); |
| 4390 | val = jbusp->j_int_data0[target]; |
| 4391 | break; |
| 4392 | case NI_J_INT_DATA1: |
| 4393 | target = (off >> 3) & (IOB_JBUS_TARGETS-1); |
| 4394 | val = jbusp->j_int_data1[target]; |
| 4395 | break; |
| 4396 | case NI_J_INT_ADATA0: |
| 4397 | target = nsp->vcore_id; |
| 4398 | val = jbusp->j_int_data0[target]; |
| 4399 | break; |
| 4400 | case NI_J_INT_ADATA1: |
| 4401 | target = nsp->vcore_id; |
| 4402 | val = jbusp->j_int_data1[target]; |
| 4403 | break; |
| 4404 | case NI_J_INT_BUSY: |
| 4405 | target = (off >> 3) & (IOB_JBUS_TARGETS-1); |
| 4406 | val = jbusp->j_int_busy[target]; |
| 4407 | break; |
| 4408 | case NI_J_INT_ABUSY: |
| 4409 | target = nsp->vcore_id; |
| 4410 | val = jbusp->j_int_busy[target]; |
| 4411 | break; |
| 4412 | default: |
| 4413 | return false; /* illegal reg - an error */ |
| 4414 | } |
| 4415 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) |
| 4416 | *regp = val; |
| 4417 | break; |
| 4418 | |
| 4419 | default: |
| 4420 | ASSERT(0); |
| 4421 | } |
| 4422 | |
| 4423 | return true; |
| 4424 | } |
| 4425 | |
| 4426 | static bool_t ss_iob_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp) |
| 4427 | { |
| 4428 | uint_t reg; |
| 4429 | uint64_t val; |
| 4430 | ss_proc_t *npp; |
| 4431 | ss_iob_t * iobp; |
| 4432 | uint_t device; |
| 4433 | |
| 4434 | |
| 4435 | |
| 4436 | /* |
| 4437 | * FIXME: For the moment we only support 64bit accesses to registers. |
| 4438 | * we need to do better than this, but confirm partial access behaviour |
| 4439 | * with the Niagara team. |
| 4440 | */ |
| 4441 | |
| 4442 | if (MA_ldu64!=op && MA_st64!=op) return false; |
| 4443 | if (off & 7) return false; /* FIXME: 64bit access support only for the moment */ |
| 4444 | |
| 4445 | npp = (ss_proc_t *)config_addrp->config_devp->devp; |
| 4446 | iobp = npp->iobp; |
| 4447 | device = off >> 3; |
| 4448 | reg = off & 0xffff; |
| 4449 | pthread_mutex_lock( &iobp->iob_lock ); |
| 4450 | |
| 4451 | switch (op) { |
| 4452 | case MA_st64: |
| 4453 | val = *regp; |
| 4454 | |
| 4455 | #define ASSIGN_IOB(_n, _m) do { \ |
| 4456 | iobp->_n = (val & (_m)); \ |
| 4457 | if (0LL != (val & ~(_m))) goto write_reserved; \ |
| 4458 | } while (0) |
| 4459 | |
| 4460 | DBGSSI( lprintf(sp->gid, "store to iob reg: 0x%x (%s) value 0x%LLx\n", reg, ss_iob_reg_name(reg), val);); |
| 4461 | |
| 4462 | /* IOB Interrupt Registers section 7.3 of PRM 1.2 */ |
| 4463 | switch (reg) { |
| 4464 | case NI_INT_MAN0: /* internal */ |
| 4465 | case NI_INT_MAN1: /* errors */ |
| 4466 | case NI_INT_MAN2: /* SSI */ |
| 4467 | case NI_INT_MAN3: /* reserved */ |
| 4468 | |
| 4469 | ASSIGN_IOB( int_man[device], MASK64(12,8)|MASK64(5,0) ); |
| 4470 | break; |
| 4471 | |
| 4472 | case NI_INT_CTL0: /* internal */ |
| 4473 | case NI_INT_CTL1: /* errors */ |
| 4474 | case NI_INT_CTL2: /* SSI */ |
| 4475 | case NI_INT_CTL3: /* reserved */ |
| 4476 | |
| 4477 | device = (off >> 3) & (IOB_DEV_MAX-1); |
| 4478 | if (0LL != (val & ~(MASK64(2,1)))) goto write_reserved; |
| 4479 | |
| 4480 | do { |
| 4481 | uint8_t *int_ctl; |
| 4482 | |
| 4483 | int_ctl = &iobp->int_ctl[device]; |
| 4484 | |
| 4485 | *int_ctl = (val & IOB_INT_CTL_MASK) | (*int_ctl & ~IOB_INT_CTL_MASK); |
| 4486 | if (val & IOB_INT_CTL_CLEAR) { |
| 4487 | *int_ctl &= ~IOB_INT_CTL_PEND; |
| 4488 | } |
| 4489 | |
| 4490 | /* |
| 4491 | * OK PRM 1.4 S 7.2.4 indicates that if mask is cleared, and pending |
| 4492 | * is still set then an interrupt is delivered ... i.e. int_vec is set (again). |
| 4493 | */ |
| 4494 | |
| 4495 | if (((*int_ctl) & IOB_INT_CTL_PEND) && !((*int_ctl) & IOB_INT_CTL_MASK)) { |
| 4496 | *int_ctl &= ~IOB_INT_CTL_PEND; |
| 4497 | if (device == IOB_DEV_SSI) { |
| 4498 | pthread_mutex_unlock( &iobp->iob_lock ); |
| 4499 | npp->config_procp->proc_typep->ext_signal(npp->config_procp, ES_SSI, NULL); |
| 4500 | return true; |
| 4501 | } |
| 4502 | } |
| 4503 | } while (0); |
| 4504 | break; |
| 4505 | |
| 4506 | case NI_INT_VEC_DIS: |
| 4507 | if (IOB_INT_VEC_RESUME(val)) { |
| 4508 | if (0LL!=(val&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0)))) |
| 4509 | goto write_reserved; |
| 4510 | pthread_mutex_lock(&iobp->int_vec_lock); |
| 4511 | iobp->int_vec_dis = val; |
| 4512 | npp->config_procp->proc_typep->ext_signal( |
| 4513 | npp->config_procp, ES_RESUME, NULL); |
| 4514 | pthread_mutex_unlock(&iobp->int_vec_lock); |
| 4515 | |
| 4516 | } else if (IOB_INT_VEC_IDLE(val)) { |
| 4517 | simcpu_t *sp; |
| 4518 | |
| 4519 | if (0LL!=(val&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0)))) |
| 4520 | goto write_reserved; |
| 4521 | pthread_mutex_lock(&iobp->int_vec_lock); |
| 4522 | iobp->int_vec_dis = val; |
| 4523 | npp->config_procp->proc_typep->ext_signal( |
| 4524 | npp->config_procp, ES_IDLE, NULL); |
| 4525 | pthread_mutex_unlock(&iobp->int_vec_lock); |
| 4526 | } else |
| 4527 | if (IOB_INT_VEC_RESET(val)) { |
| 4528 | uint_t tidx; |
| 4529 | |
| 4530 | if (0LL!=(val&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0)))) |
| 4531 | goto write_reserved; |
| 4532 | pthread_mutex_lock(&iobp->int_vec_lock); |
| 4533 | iobp->int_vec_dis = val; |
| 4534 | npp->config_procp->proc_typep->ext_signal( |
| 4535 | npp->config_procp, ES_RESET, NULL); |
| 4536 | pthread_mutex_unlock(&iobp->int_vec_lock); |
| 4537 | } else |
| 4538 | if (IOB_INT_VEC_INTR(val)) { |
| 4539 | uint_t tidx; |
| 4540 | |
| 4541 | if (0LL!=(val&~(MASK64(17,16)|MASK64(12,8)|MASK64(5,0)))) |
| 4542 | goto write_reserved; |
| 4543 | niagara_send_xirq(sp, val); |
| 4544 | } |
| 4545 | break; |
| 4546 | case NI_J_INT_VEC: |
| 4547 | ASSIGN_IOB( j_int_vec, MASK64(5,0) ); |
| 4548 | break; |
| 4549 | case NI_RSET_STAT: |
| 4550 | ASSIGN_IOB( rset_stat, MASK64(3,1) ); |
| 4551 | break; |
| 4552 | case NI_TM_STAT_CTL: |
| 4553 | ASSIGN_IOB( tm_stat_ctl,MASK64(63,63)|MASK64(31,0) ); |
| 4554 | break; |
| 4555 | |
| 4556 | case NI_PROC_SER_NUM: |
| 4557 | case NI_CORE_AVAIL: |
| 4558 | case NI_IOB_FUSE: |
| 4559 | EXEC_WARNING( ("Attempted write to read only register in IOB:" |
| 4560 | "Write 0x%llx to register %s (offset 0x%x)", |
| 4561 | val, ss_iob_reg_name(reg), reg ) ); |
| 4562 | goto access_failed; /* RO regs */ |
| 4563 | |
| 4564 | case NI_INT_MRGN_REG: |
| 4565 | ASSIGN_IOB( int_mrgn_reg, MASK64(4,0) ); |
| 4566 | break; |
| 4567 | case NI_L2_VIS_CONTROL: |
| 4568 | ASSIGN_IOB( l2_vis_control, MASK64(3,2) ); |
| 4569 | break; |
| 4570 | case NI_L2_VIS_MASK_A: |
| 4571 | ASSIGN_IOB( l2_vis_mask_a, MASK64(51,48)|MASK64(44,40)| |
| 4572 | MASK64(33,8)|MASK64(5,2) ); |
| 4573 | break; |
| 4574 | case NI_L2_VIS_MASK_B: |
| 4575 | ASSIGN_IOB( l2_vis_mask_b, MASK64(51,48)|MASK64(44,40)| |
| 4576 | MASK64(33,8)|MASK64(5,2) ); |
| 4577 | break; |
| 4578 | case NI_L2_VIS_COMPARE_A: |
| 4579 | ASSIGN_IOB( l2_vis_compare_a, MASK64(51,48)|MASK64(44,40)| |
| 4580 | MASK64(33,8)|MASK64(5,2) ); |
| 4581 | break; |
| 4582 | case NI_L2_VIS_COMPARE_B: |
| 4583 | ASSIGN_IOB( l2_vis_compare_b, MASK64(51,48)|MASK64(44,40)| |
| 4584 | MASK64(33,8)|MASK64(5,2) ); |
| 4585 | break; |
| 4586 | case NI_L2_TRIG_DELAY: |
| 4587 | ASSIGN_IOB( l2_trig_delay, MASK64(31,0) ); |
| 4588 | break; |
| 4589 | case NI_IOB_VIS_SELECT: |
| 4590 | ASSIGN_IOB( iob_vis_select, MASK64(3,0) ); |
| 4591 | break; |
| 4592 | case NI_DB_ENET_CONTROL: |
| 4593 | ASSIGN_IOB( db_enet_control, MASK64(8,8)|MASK64(6,5)| |
| 4594 | MASK64(3,0) ); |
| 4595 | break; |
| 4596 | case NI_DB_ENET_IDLEVAL: |
| 4597 | ASSIGN_IOB( db_enet_idleval, MASK64(39,0) ); |
| 4598 | break; |
| 4599 | case NI_DB_JBUS_CONTROL: |
| 4600 | ASSIGN_IOB( db_jbus_control, MASK64(16,16)|MASK64(6,4)| |
| 4601 | MASK64(2,0) ); |
| 4602 | break; |
| 4603 | case NI_DB_JBUS_MASK0: |
| 4604 | ASSIGN_IOB( db_jbus_mask0, MASK64(45,0) ); |
| 4605 | break; |
| 4606 | case NI_DB_JBUS_MASK1: |
| 4607 | ASSIGN_IOB( db_jbus_mask1, MASK64(45,0) ); |
| 4608 | break; |
| 4609 | case NI_DB_JBUS_MASK2: |
| 4610 | ASSIGN_IOB( db_jbus_mask2, MASK64(45,0) ); |
| 4611 | break; |
| 4612 | case NI_DB_JBUS_MASK3: |
| 4613 | ASSIGN_IOB( db_jbus_mask3, MASK64(45,0) ); |
| 4614 | break; |
| 4615 | case NI_DB_JBUS_COMPARE0: |
| 4616 | ASSIGN_IOB( db_jbus_compare0, MASK64(43,0) ); |
| 4617 | break; |
| 4618 | case NI_DB_JBUS_COMPARE1: |
| 4619 | ASSIGN_IOB( db_jbus_compare1, MASK64(43,0) ); |
| 4620 | break; |
| 4621 | case NI_DB_JBUS_COMPARE2: |
| 4622 | ASSIGN_IOB( db_jbus_compare2, MASK64(43,0) ); |
| 4623 | break; |
| 4624 | case NI_DB_JBUS_COMPARE3: |
| 4625 | ASSIGN_IOB( db_jbus_compare3, MASK64(43,0) ); |
| 4626 | break; |
| 4627 | case NI_DB_JBUS_COUNT: |
| 4628 | ASSIGN_IOB( db_jbus_count, MASK64(8,0) ); |
| 4629 | break; |
| 4630 | default: |
| 4631 | EXEC_WARNING( ("Attempted write to illegal register in IOB:" |
| 4632 | "Write 0x%llx to register offset 0x%x", |
| 4633 | val, reg ) ); |
| 4634 | goto access_failed; /* illegal reg - an error */ |
| 4635 | } |
| 4636 | break; |
| 4637 | |
| 4638 | write_reserved: |
| 4639 | EXEC_WARNING( ("Attempted write to reserved field in IOB:" |
| 4640 | "Write 0x%llx to register %s (offset 0x%x)", |
| 4641 | val, ss_iob_reg_name(reg), reg ) ); |
| 4642 | pthread_mutex_unlock( &iobp->iob_lock ); |
| 4643 | return true; |
| 4644 | |
| 4645 | case MA_ldu64: |
| 4646 | #define RETRIEVE_IOB(_n, _m) do { val = ((iobp->_n) & (_m)); } while (0) |
| 4647 | |
| 4648 | switch (reg) { |
| 4649 | case NI_INT_MAN0: /* internal */ |
| 4650 | case NI_INT_MAN1: /* errors */ |
| 4651 | case NI_INT_MAN2: /* SSI */ |
| 4652 | case NI_INT_MAN3: /* reserved */ |
| 4653 | val = iobp->int_man[device]; |
| 4654 | ASSERT( 0LL == (val & ~(MASK64(12,8)|MASK64(5,0))) ); |
| 4655 | break; |
| 4656 | |
| 4657 | case NI_INT_CTL0: /* internal */ |
| 4658 | case NI_INT_CTL1: /* errors */ |
| 4659 | case NI_INT_CTL2: /* SSI */ |
| 4660 | case NI_INT_CTL3: /* reserved */ |
| 4661 | val = iobp->int_ctl[device]; |
| 4662 | ASSERT( 0LL == (val & ~0x5)); |
| 4663 | break; |
| 4664 | |
| 4665 | case NI_J_INT_VEC: |
| 4666 | RETRIEVE_IOB( j_int_vec, MASK64(5,0) ); |
| 4667 | break; |
| 4668 | case NI_INT_VEC_DIS: |
| 4669 | EXEC_WARNING( ("Attempted read to WO register in IOB: %s", |
| 4670 | ss_iob_reg_name(reg)) ); |
| 4671 | goto access_failed; |
| 4672 | case NI_RSET_STAT: |
| 4673 | RETRIEVE_IOB( rset_stat, MASK64(11,9)|MASK64(3,1) ); |
| 4674 | break; |
| 4675 | case NI_TM_STAT_CTL: |
| 4676 | RETRIEVE_IOB( tm_stat_ctl, MASK64(63,63)|MASK64(31,0) ); |
| 4677 | break; |
| 4678 | case NI_PROC_SER_NUM: |
| 4679 | RETRIEVE_IOB( proc_ser_num, MASK64(63,0) ); |
| 4680 | break; |
| 4681 | case NI_CORE_AVAIL: |
| 4682 | val = npp->core_avail; |
| 4683 | break; |
| 4684 | case NI_IOB_FUSE: |
| 4685 | RETRIEVE_IOB( iob_fuse, MASK64(31,0) ); |
| 4686 | break; |
| 4687 | case NI_INT_MRGN_REG: |
| 4688 | RETRIEVE_IOB( int_mrgn_reg, MASK64(4,0) ); |
| 4689 | break; |
| 4690 | case NI_L2_VIS_CONTROL: |
| 4691 | RETRIEVE_IOB( l2_vis_control, MASK64(3,0) ); |
| 4692 | break; |
| 4693 | case NI_L2_VIS_MASK_A: |
| 4694 | RETRIEVE_IOB( l2_vis_mask_a, MASK64(51,48)|MASK64(44,40)| |
| 4695 | MASK64(33,8)|MASK64(5,2) ); |
| 4696 | break; |
| 4697 | case NI_L2_VIS_MASK_B: |
| 4698 | RETRIEVE_IOB( l2_vis_mask_b, MASK64(51,48)|MASK64(44,40)| |
| 4699 | MASK64(33,8)|MASK64(5,2) ); |
| 4700 | break; |
| 4701 | case NI_L2_VIS_COMPARE_A: |
| 4702 | RETRIEVE_IOB( l2_vis_compare_a, MASK64(51,48)|MASK64(44,40)| |
| 4703 | MASK64(33,8)|MASK64(5,2) ); |
| 4704 | break; |
| 4705 | case NI_L2_VIS_COMPARE_B: |
| 4706 | RETRIEVE_IOB( l2_vis_compare_b, MASK64(51,48)|MASK64(44,40)| |
| 4707 | MASK64(33,8)|MASK64(5,2) ); |
| 4708 | break; |
| 4709 | case NI_L2_TRIG_DELAY: |
| 4710 | RETRIEVE_IOB( l2_trig_delay, MASK64(31,0) ); |
| 4711 | break; |
| 4712 | case NI_IOB_VIS_SELECT: |
| 4713 | RETRIEVE_IOB( iob_vis_select, MASK64(3,0) ); |
| 4714 | break; |
| 4715 | case NI_DB_ENET_CONTROL: |
| 4716 | RETRIEVE_IOB( db_enet_control, MASK64(8,8)|MASK64(6,5)| |
| 4717 | MASK64(3,0) ); |
| 4718 | break; |
| 4719 | case NI_DB_ENET_IDLEVAL: |
| 4720 | RETRIEVE_IOB( db_enet_idleval, MASK64(39,0) ); |
| 4721 | break; |
| 4722 | case NI_DB_JBUS_CONTROL: |
| 4723 | RETRIEVE_IOB( db_jbus_control, MASK64(16,16)|MASK64(6,4)| |
| 4724 | MASK64(2,0) ); |
| 4725 | break; |
| 4726 | case NI_DB_JBUS_MASK0: |
| 4727 | RETRIEVE_IOB( db_jbus_mask0, MASK64(45,0) ); |
| 4728 | break; |
| 4729 | case NI_DB_JBUS_MASK1: |
| 4730 | RETRIEVE_IOB( db_jbus_mask1, MASK64(45,0) ); |
| 4731 | break; |
| 4732 | case NI_DB_JBUS_MASK2: |
| 4733 | RETRIEVE_IOB( db_jbus_mask2, MASK64(45,0) ); |
| 4734 | break; |
| 4735 | case NI_DB_JBUS_MASK3: |
| 4736 | RETRIEVE_IOB( db_jbus_mask3, MASK64(45,0) ); |
| 4737 | break; |
| 4738 | case NI_DB_JBUS_COMPARE0: |
| 4739 | RETRIEVE_IOB( db_jbus_compare0, MASK64(43,0) ); |
| 4740 | break; |
| 4741 | case NI_DB_JBUS_COMPARE1: |
| 4742 | RETRIEVE_IOB( db_jbus_compare1, MASK64(43,0) ); |
| 4743 | break; |
| 4744 | case NI_DB_JBUS_COMPARE2: |
| 4745 | RETRIEVE_IOB( db_jbus_compare2, MASK64(43,0) ); |
| 4746 | break; |
| 4747 | case NI_DB_JBUS_COMPARE3: |
| 4748 | RETRIEVE_IOB( db_jbus_compare3, MASK64(43,0) ); |
| 4749 | break; |
| 4750 | case NI_DB_JBUS_COUNT: |
| 4751 | RETRIEVE_IOB( db_jbus_count, MASK64(8,0) ); |
| 4752 | break; |
| 4753 | default: |
| 4754 | goto access_failed; /* illegal reg - an error */ |
| 4755 | } |
| 4756 | |
| 4757 | DBGSSI( lprintf(sp->gid, "read from iob reg: 0x%x (%s) value 0x%LLx\n", reg, ss_iob_reg_name(reg), val);); |
| 4758 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) |
| 4759 | *regp = val; |
| 4760 | break; |
| 4761 | |
| 4762 | default: |
| 4763 | ASSERT(0); |
| 4764 | } |
| 4765 | |
| 4766 | pthread_mutex_unlock( &iobp->iob_lock ); |
| 4767 | return true; |
| 4768 | |
| 4769 | access_failed:; |
| 4770 | pthread_mutex_unlock( &iobp->iob_lock ); |
| 4771 | return false; |
| 4772 | } |
| 4773 | |
| 4774 | |
| 4775 | static bool_t ss_clock_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp) |
| 4776 | { |
| 4777 | uint_t reg; |
| 4778 | uint64_t val; |
| 4779 | ss_proc_t *npp; |
| 4780 | ss_clock_t * clockp; |
| 4781 | |
| 4782 | |
| 4783 | /* |
| 4784 | * FIXME: For the moment we only support 64bit accesses to registers. |
| 4785 | * we need to do better than this, but confirm partial access behaviour |
| 4786 | * with the Niagara team. |
| 4787 | */ |
| 4788 | |
| 4789 | if (MA_ldu64!=op && MA_st64!=op) return false; |
| 4790 | |
| 4791 | npp = (ss_proc_t *)config_addrp->config_devp->devp; |
| 4792 | clockp = npp->clockp; |
| 4793 | |
| 4794 | reg = off & 0xff; |
| 4795 | |
| 4796 | switch (op) { |
| 4797 | case MA_st64: |
| 4798 | val = *regp; |
| 4799 | |
| 4800 | #define ASSIGN_CLK(_n, _m) do { \ |
| 4801 | if (0LL != (val & ~(_m))) goto write_reserved; \ |
| 4802 | clockp->_n = val; \ |
| 4803 | } while (0) |
| 4804 | |
| 4805 | switch (reg) { |
| 4806 | /* Clock Unit section 11.1 of PRM 1.2 */ |
| 4807 | case SS_CLOCK_DIVIDER: |
| 4808 | ASSIGN_CLK( divider, MASK64(61,28)|MASK64(26,26)| |
| 4809 | MASK64(20,16)|MASK64(12,8)|MASK64(4,0) ); |
| 4810 | break; |
| 4811 | case SS_CLOCK_CONTROL: |
| 4812 | ASSIGN_CLK( control, MASK64(63,61)|MASK64(54,48)| |
| 4813 | MASK64(34,29)|MASK64(27,27)|MASK64(23,0) ); |
| 4814 | break; |
| 4815 | case SS_CLOCK_DLL_CONTROL: |
| 4816 | ASSIGN_CLK( dll_control, MASK64(44,40)|MASK64(38,38)| |
| 4817 | MASK64(36,32)|MASK64(19,0) ); |
| 4818 | break; |
| 4819 | case SS_CLOCK_JBUS_SYNC: |
| 4820 | ASSIGN_CLK( jbus_sync, MASK64(39,38)|MASK64(36,30)| |
| 4821 | MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) ); |
| 4822 | break; |
| 4823 | case SS_CLOCK_DLL_BYPASS: |
| 4824 | ASSIGN_CLK( dll_bypass, MASK64(61,56)|MASK64(52,48)| |
| 4825 | MASK64(45,40)|MASK64(36,32)|MASK64(29,24)| |
| 4826 | MASK64(20,16)|MASK64(13,8)|MASK64(4,0) ); |
| 4827 | break; |
| 4828 | case SS_CLOCK_DRAM_SYNC: |
| 4829 | ASSIGN_CLK( dram_sync, MASK64(39,38)|MASK64(36,30)| |
| 4830 | MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) ); |
| 4831 | break; |
| 4832 | case SS_CLOCK_VERSION: |
| 4833 | ASSIGN_CLK( version, 0LL ); |
| 4834 | break; |
| 4835 | |
| 4836 | default: |
| 4837 | /* illegal reg - an error */ |
| 4838 | return false; |
| 4839 | } |
| 4840 | break; |
| 4841 | |
| 4842 | write_reserved: |
| 4843 | EXEC_WARNING( ("Attempted write to reserved field in clock unit:" |
| 4844 | "Write 0x%llx to register %s (offset 0x%x)", |
| 4845 | val, ss_clock_reg_name(reg), reg ) ); |
| 4846 | return false; |
| 4847 | |
| 4848 | case MA_ldu64: |
| 4849 | #define RETRIEVE_CLK(_n, _m) do { val = ((clockp->_n) & (_m)); } while (0) |
| 4850 | switch (reg) { |
| 4851 | /* Clock Unit section 11.1 of PRM 1.2 */ |
| 4852 | case SS_CLOCK_DIVIDER: |
| 4853 | RETRIEVE_CLK( divider, MASK64(61,28)|MASK64(26,26)| |
| 4854 | MASK64(20,16)|MASK64(12,8)|MASK64(4,0) ); |
| 4855 | break; |
| 4856 | case SS_CLOCK_CONTROL: |
| 4857 | RETRIEVE_CLK( control, MASK64(63,61)|MASK64(54,48)| |
| 4858 | MASK64(34,29)|MASK64(27,27)|MASK64(23,0) ); |
| 4859 | break; |
| 4860 | case SS_CLOCK_DLL_CONTROL: |
| 4861 | RETRIEVE_CLK( dll_control, MASK64(44,40)|MASK64(38,38)| |
| 4862 | MASK64(36,32)|MASK64(19,0) ); |
| 4863 | break; |
| 4864 | case SS_CLOCK_JBUS_SYNC: |
| 4865 | RETRIEVE_CLK( jbus_sync, MASK64(39,38)|MASK64(36,30)| |
| 4866 | MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) ); |
| 4867 | break; |
| 4868 | case SS_CLOCK_DLL_BYPASS: |
| 4869 | RETRIEVE_CLK( dll_bypass, MASK64(61,56)|MASK64(52,48)| |
| 4870 | MASK64(45,40)|MASK64(36,32)|MASK64(29,24)| |
| 4871 | MASK64(20,16)|MASK64(13,8)|MASK64(4,0) ); |
| 4872 | break; |
| 4873 | case SS_CLOCK_DRAM_SYNC: |
| 4874 | RETRIEVE_CLK( dram_sync, MASK64(39,38)|MASK64(36,30)| |
| 4875 | MASK64(28,22)|MASK64(20,16)|MASK64(12,8)|MASK64(4,0) ); |
| 4876 | break; |
| 4877 | case SS_CLOCK_VERSION: |
| 4878 | RETRIEVE_CLK( version, 0LL ); |
| 4879 | break; |
| 4880 | case SS_DBG_INIT: |
| 4881 | val = 0; |
| 4882 | break; |
| 4883 | |
| 4884 | default: |
| 4885 | /* illegal reg - an error */ |
| 4886 | return false; |
| 4887 | } |
| 4888 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) |
| 4889 | *regp = val; |
| 4890 | break; |
| 4891 | |
| 4892 | default: |
| 4893 | ASSERT(0); |
| 4894 | } |
| 4895 | |
| 4896 | return true; |
| 4897 | } |
| 4898 | |
| 4899 | static bool_t ss_l2_ctl_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp) |
| 4900 | { |
| 4901 | ss_proc_t * npp; |
| 4902 | uint_t reg, bank; |
| 4903 | uint64_t val; |
| 4904 | ss_l2_cache_t * l2p; |
| 4905 | |
| 4906 | |
| 4907 | /* |
| 4908 | * FIXME: For the moment we only support 64bit accesses to registers. |
| 4909 | * we need to do better than this, but confirm partial access behaviour |
| 4910 | * with the Niagara team. |
| 4911 | */ |
| 4912 | |
| 4913 | if (MA_ldu64!=op && MA_st64!=op) return false; |
| 4914 | |
| 4915 | npp = (ss_proc_t *)config_addrp->config_devp->devp; |
| 4916 | l2p = npp->l2p; |
| 4917 | |
| 4918 | bank = (off >> 6) & 0x3; |
| 4919 | reg = (off >> 32) & 0xf; |
| 4920 | |
| 4921 | switch (op) { |
| 4922 | case MA_st64: |
| 4923 | val = *regp; |
| 4924 | |
| 4925 | if (reg >= 0x8) { |
| 4926 | #define ASSIGN_L2(_n, _m) do { \ |
| 4927 | if (0LL != (val & ~(_m))) goto write_reserved; \ |
| 4928 | l2p->_n[bank] = val; \ |
| 4929 | } while (0) |
| 4930 | switch (reg) { |
| 4931 | /* L2 BIST Control Reg section 18.7.2 of PRM 1.4 */ |
| 4932 | case SS_L2_TAG_BIST: |
| 4933 | ASSIGN_L2( bist_ctl, MASK64(6,0) ); |
| 4934 | if (val & 1) l2p->bist_ctl[bank] |= 0x400; |
| 4935 | break; |
| 4936 | /* L2 Control Register section 18.5.1 of PRM 1.2 */ |
| 4937 | case SS_L2_CONTROL: |
| 4938 | ASSIGN_L2( control, MASK64(21,0) ); |
| 4939 | break; |
| 4940 | /* Error handling section 12.6 of PRM 1.1 */ |
| 4941 | case SS_L2_ERROR_ENABLE: |
| 4942 | ASSIGN_L2( error_enable, MASK64(2,0) ); |
| 4943 | break; |
| 4944 | case SS_L2_ERROR_STATUS: |
| 4945 | l2p->error_status[bank] &= ~val; |
| 4946 | l2p->error_status[bank] &= |
| 4947 | MASK64(63,62)|MASK64(53,35); |
| 4948 | l2p->error_status[bank] |= val & |
| 4949 | (MASK64(61,61)|MASK64(59,54)|MASK64(31,0)); |
| 4950 | break; |
| 4951 | case SS_L2_ERROR_ADDRESS: |
| 4952 | ASSIGN_L2( error_address, MASK64(39,4) ); |
| 4953 | break; |
| 4954 | case SS_L2_ERROR_INJECT: |
| 4955 | ASSIGN_L2( error_inject, MASK64(1,0) ); |
| 4956 | break; |
| 4957 | default: |
| 4958 | /* illegal reg - an error */ |
| 4959 | return false; |
| 4960 | } |
| 4961 | } else |
| 4962 | /* L2 Cache Diagnostic Access section 18.6 of PRM 1.2 */ |
| 4963 | if (reg < 0x4) { |
| 4964 | uint64_t idx; |
| 4965 | |
| 4966 | /* index stores to a 32bit word and its ECC+rsvd bits */ |
| 4967 | idx = off & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2; |
| 4968 | /* put oddeven select bit low so data is in addr order */ |
| 4969 | idx |= ((off >> L2_ODDEVEN_SHIFT) & 1); |
| 4970 | l2p->diag_datap[idx] = val; |
| 4971 | |
| 4972 | } else |
| 4973 | if (reg < 0x6) { |
| 4974 | uint64_t idx; |
| 4975 | |
| 4976 | /*index stores to a tag and its ECC+rsvd bits */ |
| 4977 | idx = off & (L2_WAY | L2_LINE | L2_BANK) >> 6; |
| 4978 | l2p->diag_tagp[idx] = val; |
| 4979 | } else { |
| 4980 | uint64_t idx; |
| 4981 | |
| 4982 | /* index valid/dirty or alloc/used bits and parity */ |
| 4983 | idx = off & (L2_LINE | L2_BANK) >> 6; |
| 4984 | idx |= ((off & L2_VDSEL) >> 10); |
| 4985 | l2p->diag_vuadp[idx] = val; |
| 4986 | } |
| 4987 | |
| 4988 | break; |
| 4989 | |
| 4990 | write_reserved: |
| 4991 | EXEC_WARNING( ("Attempted write to reserved field in l2 cache controller:" |
| 4992 | "Write 0x%llx to bank %d, register %s (offset 0x%x)", |
| 4993 | val, bank, ss_l2_ctrl_reg_name(reg), reg ) ); |
| 4994 | return false; |
| 4995 | |
| 4996 | case MA_ldu64: |
| 4997 | if (reg >= 0x8) { |
| 4998 | #define RETRIEVE_L2(_n, _m) do { val = ((l2p->_n[bank]) & (_m)); } while (0) |
| 4999 | switch (reg) { |
| 5000 | /* L2 BIST Control Reg section 18.7.2 of PRM 1.4 */ |
| 5001 | case SS_L2_TAG_BIST: |
| 5002 | RETRIEVE_L2( bist_ctl, MASK64(10,0) ); |
| 5003 | break; |
| 5004 | /* L2 Control Register section 18.5.1 of PRM 1.2 */ |
| 5005 | case SS_L2_CONTROL: |
| 5006 | RETRIEVE_L2( control, MASK64(63,57)|MASK64(15,0) ); |
| 5007 | break; |
| 5008 | /* Error handling section 12.6 of PRM 1.1 */ |
| 5009 | case SS_L2_ERROR_ENABLE: |
| 5010 | RETRIEVE_L2( error_enable, MASK64(2,0) ); |
| 5011 | break; |
| 5012 | case SS_L2_ERROR_STATUS: |
| 5013 | RETRIEVE_L2( error_status, |
| 5014 | MASK64(63,61)|MASK64(59,35)|MASK64(31,0) ); |
| 5015 | break; |
| 5016 | case SS_L2_ERROR_ADDRESS: |
| 5017 | RETRIEVE_L2( error_address, MASK64(39,4) ); |
| 5018 | break; |
| 5019 | case SS_L2_ERROR_INJECT: |
| 5020 | RETRIEVE_L2( error_inject, MASK64(1,0) ); |
| 5021 | break; |
| 5022 | default: |
| 5023 | /* illegal reg - an error */ |
| 5024 | return false; |
| 5025 | } |
| 5026 | } else |
| 5027 | /* L2 Cache Diagnostic Access section 18.6 of PRM 1.2 */ |
| 5028 | if (reg < 0x4) { |
| 5029 | uint64_t idx; |
| 5030 | |
| 5031 | /* index retrieves a 32bit word and its ECC+rsvd bits */ |
| 5032 | idx = off & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2; |
| 5033 | /* put oddeven select bit low so data is in addr order */ |
| 5034 | idx |= ((off >> L2_ODDEVEN_SHIFT) & 1); |
| 5035 | val = l2p->diag_datap[idx]; |
| 5036 | |
| 5037 | } else |
| 5038 | if (reg < 0x6) { |
| 5039 | uint64_t idx; |
| 5040 | |
| 5041 | /* index retrieves a tag and its ECC+rsvd bits */ |
| 5042 | idx = off & (L2_WAY | L2_LINE | L2_BANK) >> 6; |
| 5043 | val = l2p->diag_tagp[idx]; |
| 5044 | } else { |
| 5045 | uint64_t idx; |
| 5046 | |
| 5047 | /* index valid/dirty or alloc/used bits and parity */ |
| 5048 | idx = off & (L2_LINE | L2_BANK) >> 6; |
| 5049 | idx |= ((off & L2_VDSEL) >> 10); |
| 5050 | val = l2p->diag_vuadp[idx]; |
| 5051 | } |
| 5052 | |
| 5053 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) |
| 5054 | *regp = val; |
| 5055 | break; |
| 5056 | |
| 5057 | default: |
| 5058 | ASSERT(0); |
| 5059 | } |
| 5060 | |
| 5061 | return true; |
| 5062 | } |
| 5063 | |
| 5064 | static bool_t ss_dram_ctl_access(simcpu_t *sp, config_addr_t * config_addrp, tpaddr_t off, maccess_t op, uint64_t * regp) |
| 5065 | { |
| 5066 | ss_proc_t * npp; |
| 5067 | uint_t reg, bank; |
| 5068 | uint64_t val; |
| 5069 | ss_dram_bank_t * dbp; |
| 5070 | |
| 5071 | /* |
| 5072 | * FIXME: For the moment we only support 64bit accesses to registers. |
| 5073 | * we need to do better than this, but confirm partial access behaviour |
| 5074 | * with the Niagara team. |
| 5075 | */ |
| 5076 | |
| 5077 | npp = (ss_proc_t *)config_addrp->config_devp->devp; |
| 5078 | |
| 5079 | if (MA_ldu64!=op && MA_st64!=op) return false; |
| 5080 | |
| 5081 | bank = off >> 12; |
| 5082 | ASSERT (bank < npp->num_mbanks); /* this should be enforced by the config_dev range */ |
| 5083 | |
| 5084 | dbp = &(npp->mbankp[bank]); |
| 5085 | |
| 5086 | DBGMC( lprintf(sp->gid, "Memory controller bank %d : register %s\n", bank, ss_dram_ctrl_reg_name(reg)); ); |
| 5087 | |
| 5088 | reg = off & ((1<<12)-1); |
| 5089 | |
| 5090 | switch (op) { |
| 5091 | case MA_st64: |
| 5092 | val = *regp; |
| 5093 | |
| 5094 | #define ASSIGN_DB(_n, _m) do { \ |
| 5095 | dbp->_n &= ~(_m); \ |
| 5096 | dbp->_n |= (val & (_m)); \ |
| 5097 | } while (0) |
| 5098 | |
| 5099 | switch (reg) { |
| 5100 | /* DRAM controller section 15.5 of RPM 1.1 */ |
| 5101 | case SS_DRAM_CAS_ADDR_WIDTH: ASSIGN_DB( cas_addr_width, MASK64(3, 0) ); break; |
| 5102 | case SS_DRAM_RAS_ADDR_WIDTH: ASSIGN_DB( ras_addr_width, MASK64(3, 0) ); break; |
| 5103 | case SS_DRAM_CAS_LAT: ASSIGN_DB( cas_lat, MASK64(2, 0) ); break; |
| 5104 | case SS_DRAM_SCRUB_FREQ: ASSIGN_DB( scrub_freq, MASK64(11, 0) ); break; |
| 5105 | case SS_DRAM_REFRESH_FREQ: ASSIGN_DB( refresh_freq, MASK64(12, 0) ); break; |
| 5106 | case SS_DRAM_REFRESH_COUNTER: ASSIGN_DB( refresh_counter, MASK64(12, 0) ); break; |
| 5107 | case SS_DRAM_SCRUB_ENABLE: ASSIGN_DB( scrub_enable, MASK64(0, 0) ); break; |
| 5108 | case SS_DRAM_TRRD: ASSIGN_DB( trrd, MASK64(3, 0) ); break; |
| 5109 | case SS_DRAM_TRC: ASSIGN_DB( trc, MASK64(4, 0) ); break; |
| 5110 | case SS_DRAM_DRAM_TRCD: ASSIGN_DB( dram_trcd, MASK64(3, 0) ); break; |
| 5111 | case SS_DRAM_TWTR: ASSIGN_DB( twtr, MASK64(3, 0) ); break; |
| 5112 | case SS_DRAM_TRTW: ASSIGN_DB( trtw, MASK64(3, 0) ); break; |
| 5113 | case SS_DRAM_TRTP: ASSIGN_DB( trtp, MASK64(2, 0) ); break; |
| 5114 | case SS_DRAM_TRAS: ASSIGN_DB( tras, MASK64(3, 0) ); break; |
| 5115 | case SS_DRAM_TRP: ASSIGN_DB( trp, MASK64(3, 0) ); break; |
| 5116 | case SS_DRAM_TWR: ASSIGN_DB( twr, MASK64(3, 0) ); break; |
| 5117 | case SS_DRAM_TRFC: ASSIGN_DB( trfc, MASK64(6, 0) ); break; |
| 5118 | case SS_DRAM_TMRD: ASSIGN_DB( tmrd, MASK64(1, 0) ); break; |
| 5119 | case SS_DRAM_TIWTR: ASSIGN_DB( tiwtr, MASK64(1, 0) ); break; |
| 5120 | case SS_DRAM_PRECHARGE_WAIT: ASSIGN_DB( precharge_wait, MASK64(7, 0) ); break; |
| 5121 | case SS_DRAM_DIMM_STACK: ASSIGN_DB( dimm_stack, MASK64(0, 0) ); break; |
| 5122 | case SS_DRAM_EXT_WR_MODE2: ASSIGN_DB( ext_wr_mode2, MASK64(14, 0) ); break; |
| 5123 | case SS_DRAM_EXT_WR_MODE1: ASSIGN_DB( ext_wr_mode1, MASK64(14, 0) ); break; |
| 5124 | case SS_DRAM_EXT_WR_MODE3: ASSIGN_DB( ext_wr_mode3, MASK64(14, 0) ); break; |
| 5125 | case SS_DRAM_WAIR_CONTROL: ASSIGN_DB( wair_control, MASK64(0, 0) ); break; |
| 5126 | case SS_DRAM_RANK1_PRESENT: ASSIGN_DB( rank1_present, MASK64(0, 0) ); break; |
| 5127 | case SS_DRAM_CHANNEL_DISABLED: ASSIGN_DB( channel_disabled, MASK64(0, 0) ); break; |
| 5128 | case SS_DRAM_SEL_LO_ADDR_BITS: ASSIGN_DB( sel_lo_addr_bits, MASK64(0, 0) ); break; |
| 5129 | case SS_DRAM_DIMM_INIT: |
| 5130 | if (0LL != (val & ~(7))) goto write_reserved; |
| 5131 | dbp->dimm_init = val; |
| 5132 | /* DRAM Init sequence done is instantaneous */ |
| 5133 | dbp->init_status = 1; |
| 5134 | break; |
| 5135 | |
| 5136 | case SS_DRAM_SW_DV_COUNT: ASSIGN_DB( sw_dv_count, MASK64(2, 0) ); break; |
| 5137 | case SS_DRAM_HW_DMUX_CLK_INV: ASSIGN_DB( hw_dmux_clk_inv, MASK64(0, 0) ); break; |
| 5138 | case SS_DRAM_PAD_EN_CLK_INV: ASSIGN_DB( pad_en_clk_inv, MASK64(4, 0) ); break; |
| 5139 | case SS_DRAM_MODE_WRITE_STATUS: ASSIGN_DB( mode_write_status, MASK64(0, 0) ); break; |
| 5140 | case SS_DRAM_INIT_STATUS: ASSIGN_DB( init_status, MASK64(0, 0) ); break; |
| 5141 | case SS_DRAM_DIMM_PRESENT: ASSIGN_DB( dimm_present, MASK64(3, 0) ); break; |
| 5142 | case SS_DRAM_FAILOVER_STATUS: ASSIGN_DB( failover_status, MASK64(0, 0) ); break; |
| 5143 | case SS_DRAM_FAILOVER_MASK: ASSIGN_DB( failover_mask, MASK64(34, 0) ); break; |
| 5144 | |
| 5145 | /* Performance counter section 10.3 of PRM 1.1 */ |
| 5146 | case SS_DRAM_PERF_CTL: ASSIGN_DB( perf_ctl, MASK64(7, 0) ); break; |
| 5147 | case SS_DRAM_PERF_COUNT: ASSIGN_DB( perf_count, MASK64(63, 0) ); break; |
| 5148 | |
| 5149 | /* Error handling section 12.9 of PRM 1.1 */ |
| 5150 | case SS_DRAM_ERROR_STATUS: |
| 5151 | dbp->error_status &= ~val; |
| 5152 | dbp->error_status &= MASK64(63,57); |
| 5153 | dbp->error_status |= val & MASK64(15,0); |
| 5154 | break; |
| 5155 | case SS_DRAM_ERROR_ADDRESS: ASSIGN_DB( error_address, MASK64(39,4) ); break; |
| 5156 | case SS_DRAM_ERROR_INJECT: ASSIGN_DB( error_inject, MASK64(31,30)|MASK64(15,0) ); break; |
| 5157 | case SS_DRAM_ERROR_COUNTER: ASSIGN_DB( error_counter, MASK64(17,0) ); break; |
| 5158 | case SS_DRAM_ERROR_LOCATION: ASSIGN_DB( error_location, MASK64(35,0) ); break; |
| 5159 | |
| 5160 | /* Power management section 16.2 of PRM 1.1 */ |
| 5161 | case SS_DRAM_OPEN_BANK_MAX: ASSIGN_DB( open_bank_max, MASK64(16, 0) ); break; |
| 5162 | case SS_DRAM_PROG_TIME_CNTR: ASSIGN_DB( prog_time_cntr, MASK64(15, 0) ); break; |
| 5163 | |
| 5164 | /* Hardware debug section 19.1 of PRM 1.1 */ |
| 5165 | case SS_DRAM_DBG_TRG_EN: ASSIGN_DB( dbg_trg_en, MASK64(7, 0) ); break; |
| 5166 | default: |
| 5167 | /* illegal reg - an error */ |
| 5168 | return false; |
| 5169 | } |
| 5170 | break; |
| 5171 | |
| 5172 | write_reserved: |
| 5173 | EXEC_WARNING( ("Attempted write to reserved field in dram controller: Write 0x%llx to bank %d, register %s (offset 0x%x)", |
| 5174 | val, bank, ss_dram_ctrl_reg_name(reg), reg ) ); |
| 5175 | return false; |
| 5176 | |
| 5177 | case MA_ldu64: |
| 5178 | #define RETRIEVE_DB(_n, _m) do { val = ((dbp->_n) & (_m)); } while (0) |
| 5179 | switch (reg) { |
| 5180 | /* DRAM controller section 15.5 of RPM 1.1 */ |
| 5181 | case SS_DRAM_CAS_ADDR_WIDTH: RETRIEVE_DB( cas_addr_width, MASK64(3, 0) ); break; |
| 5182 | case SS_DRAM_RAS_ADDR_WIDTH: RETRIEVE_DB( ras_addr_width, MASK64(3, 0) ); break; |
| 5183 | case SS_DRAM_CAS_LAT: RETRIEVE_DB( cas_lat, MASK64(2, 0) ); break; |
| 5184 | case SS_DRAM_SCRUB_FREQ: RETRIEVE_DB( scrub_freq, MASK64(11, 0) ); break; |
| 5185 | case SS_DRAM_REFRESH_FREQ: RETRIEVE_DB( refresh_freq, MASK64(12, 0) ); break; |
| 5186 | case SS_DRAM_REFRESH_COUNTER: RETRIEVE_DB( refresh_counter, MASK64(12, 0) ); break; |
| 5187 | case SS_DRAM_SCRUB_ENABLE: RETRIEVE_DB( scrub_enable, MASK64(0, 0) ); break; |
| 5188 | case SS_DRAM_TRRD: RETRIEVE_DB( trrd, MASK64(3, 0) ); break; |
| 5189 | case SS_DRAM_TRC: RETRIEVE_DB( trc, MASK64(4, 0) ); break; |
| 5190 | case SS_DRAM_DRAM_TRCD: RETRIEVE_DB( dram_trcd, MASK64(3, 0) ); break; |
| 5191 | case SS_DRAM_TWTR: RETRIEVE_DB( twtr, MASK64(3, 0) ); break; |
| 5192 | case SS_DRAM_TRTW: RETRIEVE_DB( trtw, MASK64(3, 0) ); break; |
| 5193 | case SS_DRAM_TRTP: RETRIEVE_DB( trtp, MASK64(2, 0) ); break; |
| 5194 | case SS_DRAM_TRAS: RETRIEVE_DB( tras, MASK64(3, 0) ); break; |
| 5195 | case SS_DRAM_TRP: RETRIEVE_DB( trp, MASK64(3, 0) ); break; |
| 5196 | case SS_DRAM_TWR: RETRIEVE_DB( twr, MASK64(3, 0) ); break; |
| 5197 | case SS_DRAM_TRFC: RETRIEVE_DB( trfc, MASK64(6, 0) ); break; |
| 5198 | case SS_DRAM_TMRD: RETRIEVE_DB( tmrd, MASK64(1, 0) ); break; |
| 5199 | case SS_DRAM_TIWTR: RETRIEVE_DB( tiwtr, MASK64(1, 0) ); break; |
| 5200 | case SS_DRAM_PRECHARGE_WAIT: RETRIEVE_DB( precharge_wait, MASK64(7, 0) ); break; |
| 5201 | case SS_DRAM_DIMM_STACK: RETRIEVE_DB( dimm_stack, MASK64(0, 0) ); break; |
| 5202 | case SS_DRAM_EXT_WR_MODE2: RETRIEVE_DB( ext_wr_mode2, MASK64(14, 0) ); break; |
| 5203 | case SS_DRAM_EXT_WR_MODE1: RETRIEVE_DB( ext_wr_mode1, MASK64(14, 0) ); break; |
| 5204 | case SS_DRAM_EXT_WR_MODE3: RETRIEVE_DB( ext_wr_mode3, MASK64(14, 0) ); break; |
| 5205 | case SS_DRAM_WAIR_CONTROL: RETRIEVE_DB( wair_control, MASK64(0, 0) ); break; |
| 5206 | case SS_DRAM_RANK1_PRESENT: RETRIEVE_DB( rank1_present, MASK64(0, 0) ); break; |
| 5207 | case SS_DRAM_CHANNEL_DISABLED: RETRIEVE_DB( channel_disabled, MASK64(0, 0) ); break; |
| 5208 | case SS_DRAM_SEL_LO_ADDR_BITS: RETRIEVE_DB( sel_lo_addr_bits, MASK64(0, 0) ); break; |
| 5209 | case SS_DRAM_DIMM_INIT: RETRIEVE_DB( dimm_init, MASK64(2, 0) ); break; |
| 5210 | case SS_DRAM_SW_DV_COUNT: RETRIEVE_DB( sw_dv_count, MASK64(2, 0) ); break; |
| 5211 | case SS_DRAM_HW_DMUX_CLK_INV: RETRIEVE_DB( hw_dmux_clk_inv, MASK64(0, 0) ); break; |
| 5212 | case SS_DRAM_PAD_EN_CLK_INV: RETRIEVE_DB( pad_en_clk_inv, MASK64(4, 0) ); break; |
| 5213 | case SS_DRAM_MODE_WRITE_STATUS: RETRIEVE_DB( mode_write_status, MASK64(0, 0) ); break; |
| 5214 | case SS_DRAM_INIT_STATUS: RETRIEVE_DB( init_status, MASK64(0, 0) ); break; |
| 5215 | case SS_DRAM_DIMM_PRESENT: RETRIEVE_DB( dimm_present, MASK64(3, 0) ); break; |
| 5216 | case SS_DRAM_FAILOVER_STATUS: RETRIEVE_DB( failover_status, MASK64(0, 0) ); break; |
| 5217 | case SS_DRAM_FAILOVER_MASK: RETRIEVE_DB( failover_mask, MASK64(34, 0) ); break; |
| 5218 | |
| 5219 | /* Performance counter section 10.3 of PRM 1.1 */ |
| 5220 | case SS_DRAM_PERF_CTL: RETRIEVE_DB( perf_ctl, MASK64(7, 0) ); break; |
| 5221 | case SS_DRAM_PERF_COUNT: RETRIEVE_DB( perf_count, MASK64(63, 0) ); break; |
| 5222 | |
| 5223 | /* Error handling section 12.9 of PRM 1.1 */ |
| 5224 | case SS_DRAM_ERROR_STATUS: RETRIEVE_DB( error_status, MASK64(63,57)|MASK64(15,0) ); break; |
| 5225 | case SS_DRAM_ERROR_ADDRESS: RETRIEVE_DB( error_address, MASK64(39,4) ); break; |
| 5226 | case SS_DRAM_ERROR_INJECT: RETRIEVE_DB( error_inject, MASK64(31,30)|MASK64(15,0) ); break; |
| 5227 | case SS_DRAM_ERROR_COUNTER: RETRIEVE_DB( error_counter, MASK64(17,0) ); break; |
| 5228 | case SS_DRAM_ERROR_LOCATION: RETRIEVE_DB( error_location, MASK64(35,0) ); break; |
| 5229 | |
| 5230 | /* Power management section 16.2 of PRM 1.1 */ |
| 5231 | case SS_DRAM_OPEN_BANK_MAX: RETRIEVE_DB( open_bank_max, MASK64(16, 0) ); break; |
| 5232 | case SS_DRAM_PROG_TIME_CNTR: RETRIEVE_DB( prog_time_cntr, MASK64(15, 0) ); break; |
| 5233 | |
| 5234 | /* Hardware debug section 19.1 of PRM 1.1 */ |
| 5235 | case SS_DRAM_DBG_TRG_EN: RETRIEVE_DB( dbg_trg_en, MASK64(7, 0) ); break; |
| 5236 | default: |
| 5237 | /* illegal reg - an error */ |
| 5238 | return false; |
| 5239 | } |
| 5240 | if (&(sp->intreg[Reg_sparcv9_g0]) != regp) |
| 5241 | *regp = val; |
| 5242 | break; |
| 5243 | |
| 5244 | default: |
| 5245 | ASSERT(0); |
| 5246 | } |
| 5247 | |
| 5248 | return true; |
| 5249 | } |
| 5250 | |
| 5251 | |
| 5252 | |
| 5253 | /**************************************************************** |
| 5254 | * |
| 5255 | * SunSPARC CPU interrupt bridge code |
| 5256 | * |
| 5257 | ****************************************************************/ |
| 5258 | |
| 5259 | /* write to SS_ASI_SWVR_UDB_INTR_W */ |
| 5260 | |
| 5261 | /* FIXMENOW .... this function to go away ... use ss_ext_signal .... */ |
| 5262 | |
| 5263 | |
| 5264 | static void niagara_send_xirq(simcpu_t * sp, uint64_t val) |
| 5265 | { |
| 5266 | uint_t strand, core; |
| 5267 | uint_t vec_bit; |
| 5268 | uint_t type; |
| 5269 | ss_strand_t * tstrandp; |
| 5270 | ss_proc_t * npp; |
| 5271 | bool_t pay_attention; |
| 5272 | |
| 5273 | npp = (ss_proc_t *)(sp->config_procp->procp); |
| 5274 | |
| 5275 | type = (val >> 16) & MASK64(1,0); |
| 5276 | /* strand captures reserved field too .. but should be zero ... */ |
| 5277 | if (type != 0) EXEC_WARNING(("Write to SS_ASI_SWVR_UDB_INTR_W with non-zero type field (@pc=0x%llx)", sp->pc)); |
| 5278 | |
| 5279 | /* check actual value against number of strands later ... */ |
| 5280 | strand = (val >> 8) & MASK64(4,0); |
| 5281 | vec_bit = val & MASK64(5,0); |
| 5282 | |
| 5283 | /* normalize strand to internal strand */ |
| 5284 | strand = STRANDID2IDX(npp, strand); |
| 5285 | if (!VALIDIDX(npp, strand)) { |
| 5286 | EXEC_WARNING(("Write to SS_ASI_SWVR_UDB_INTR_W with illegal strand value 0x%llx (@pc=0x%llx)", strand, sp->pc)); |
| 5287 | return; |
| 5288 | } |
| 5289 | |
| 5290 | tstrandp = &(npp->ss_strandp[strand]); |
| 5291 | |
| 5292 | pthread_mutex_lock(&tstrandp->irq_lock); |
| 5293 | pay_attention = (0LL == tstrandp->irq_vector); |
| 5294 | tstrandp->irq_vector |= (1LL<<vec_bit); |
| 5295 | pthread_mutex_unlock(&tstrandp->irq_lock); |
| 5296 | DBGE( lprintf(sp->gid, "irq_send: tstrand=%u irq_vector=%llx " |
| 5297 | "(pc=0x%llx)\n", npp->strand[strand]->simp->gid, |
| 5298 | tstrandp->irq_vector, sp->pc); ); |
| 5299 | |
| 5300 | /* |
| 5301 | * The complicated part here is that the execution thread |
| 5302 | * determines when the interrupt is actually delivered if at |
| 5303 | * all, all we need to do here is to ensure that that thread |
| 5304 | * pays attention to the fact the the interrupt vector status |
| 5305 | * has changed .. we only care if it goes non-zero ... |
| 5306 | */ |
| 5307 | |
| 5308 | if (pay_attention) { |
| 5309 | sparcv9_cpu_t * tv9p; |
| 5310 | simcpu_t * tsp; |
| 5311 | |
| 5312 | tv9p = npp->strand[strand]; |
| 5313 | |
| 5314 | tsp = tv9p->simp; |
| 5315 | tsp->async_event = true; |
| 5316 | } |
| 5317 | } |
| 5318 | |
| 5319 | |
| 5320 | /* |
| 5321 | * non-execution threads to use this method for posting |
| 5322 | * interrupts and other actions to simcpu. |
| 5323 | */ |
| 5324 | |
| 5325 | uint64_t ss_ext_signal(config_proc_t * config_procp, ext_sig_t sigtype, void *vp) |
| 5326 | { |
| 5327 | ss_proc_t *npp; |
| 5328 | simcpu_t *sp; |
| 5329 | sparcv9_cpu_t *v9p; |
| 5330 | ss_strand_t *nsp; |
| 5331 | ss_jbus_t *jbusp; |
| 5332 | jbus_mondo_t *mondop; |
| 5333 | bool_t pay_attention; |
| 5334 | uint_t strand, tidx; |
| 5335 | int i; |
| 5336 | uint64_t intr, ret; |
| 5337 | ss_iob_t *iobp; |
| 5338 | |
| 5339 | npp = (ss_proc_t*)(config_procp->procp); |
| 5340 | |
| 5341 | switch (sigtype) { |
| 5342 | case ES_IDLE: |
| 5343 | /* what if thread not running? */ |
| 5344 | strand = IOB_INT_VEC_THREAD(npp->iobp->int_vec_dis); |
| 5345 | tidx = STRANDID2IDX(npp, strand); |
| 5346 | #if 0 |
| 5347 | lprintf(sp->gid, "IDLE: strand=%d idx=%d\n", strand, tidx)); */ |
| 5348 | #endif |
| 5349 | /* skip strands that do not exist */ |
| 5350 | if (!VALIDIDX(npp, tidx)) |
| 5351 | return (0); |
| 5352 | v9p = npp->strand[tidx]; |
| 5353 | sp = v9p->simp; |
| 5354 | nsp = &(npp->ss_strandp[tidx]); |
| 5355 | pthread_mutex_lock(&npp->thread_sts_lock); |
| 5356 | SET_THREAD_STS_SFSM(npp, nsp, THREAD_STS_TSTATE_IDLE); |
| 5357 | if (!PARKED(sp)) |
| 5358 | simcore_cpu_state_park(sp); |
| 5359 | pthread_mutex_unlock(&npp->thread_sts_lock); |
| 5360 | |
| 5361 | return (0); |
| 5362 | |
| 5363 | case ES_RESUME: |
| 5364 | case ES_RESET: |
| 5365 | /* what if thread not idle? */ |
| 5366 | strand = IOB_INT_VEC_THREAD(npp->iobp->int_vec_dis); |
| 5367 | tidx = STRANDID2IDX(npp, strand); |
| 5368 | /* skip strands that do not exist */ |
| 5369 | if (!VALIDIDX(npp, tidx)) |
| 5370 | return (0); |
| 5371 | v9p = npp->strand[tidx]; |
| 5372 | sp = v9p->simp; |
| 5373 | nsp = &(npp->ss_strandp[tidx]); |
| 5374 | pthread_mutex_lock(&npp->thread_sts_lock); |
| 5375 | SET_THREAD_STS_SFSM(npp, nsp, THREAD_STS_TSTATE_RUN); |
| 5376 | if (PARKED(sp)) |
| 5377 | simcore_cpu_state_unpark(sp); |
| 5378 | pthread_mutex_unlock(&npp->thread_sts_lock); |
| 5379 | |
| 5380 | return (0); |
| 5381 | |
| 5382 | case ES_JBUS: |
| 5383 | jbusp = npp->jbusp; |
| 5384 | mondop = (jbus_mondo_t *)vp; |
| 5385 | tidx = mondop->adr.target; |
| 5386 | |
| 5387 | pthread_mutex_lock(&jbusp->lock); |
| 5388 | if (jbusp->j_int_busy[tidx] & IOB_JBUS_BUSY) { |
| 5389 | pthread_mutex_unlock(&jbusp->lock); |
| 5390 | return IOB_JBUS_NACK; |
| 5391 | } else { |
| 5392 | jbusp->j_int_data0[tidx] = mondop->data0; |
| 5393 | jbusp->j_int_data1[tidx] = mondop->data1; |
| 5394 | jbusp->j_int_busy[tidx] = mondop->adr.source | IOB_JBUS_BUSY; |
| 5395 | } |
| 5396 | pthread_mutex_unlock(&jbusp->lock); |
| 5397 | |
| 5398 | strand = STRANDID2IDX(npp, tidx); |
| 5399 | if (!VALIDIDX(npp, strand)) |
| 5400 | return (IOB_JBUS_NACK); /* XXX */ |
| 5401 | nsp = &(npp->ss_strandp[strand]); |
| 5402 | |
| 5403 | pthread_mutex_lock(&nsp->irq_lock); |
| 5404 | pay_attention = (0LL == nsp->irq_vector); |
| 5405 | nsp->irq_vector |= (uint64_t)1 << npp->iobp->j_int_vec; |
| 5406 | pthread_mutex_unlock(&nsp->irq_lock); |
| 5407 | |
| 5408 | if (pay_attention) { |
| 5409 | v9p = npp->strand[strand]; |
| 5410 | sp = v9p->simp; |
| 5411 | sp->async_event = true; |
| 5412 | } |
| 5413 | |
| 5414 | return IOB_JBUS_ACK; |
| 5415 | |
| 5416 | /* This used to deliver a SSI interrupt event */ |
| 5417 | /* really needs to be handled in a different way */ |
| 5418 | /* FIXME ! */ |
| 5419 | case ES_SSI: |
| 5420 | iobp = npp->iobp; |
| 5421 | |
| 5422 | pthread_mutex_lock(&iobp->iob_lock); |
| 5423 | |
| 5424 | /* If interrupt is masked in IOB simply set again the |
| 5425 | * pending bit ... if not masked, then deliver an |
| 5426 | * interrupt using the irq_vector |
| 5427 | */ |
| 5428 | |
| 5429 | if (iobp->int_ctl[IOB_DEV_SSI]&IOB_INT_CTL_MASK) { |
| 5430 | iobp->int_ctl[IOB_DEV_SSI] |= IOB_INT_CTL_PEND; |
| 5431 | } else { |
| 5432 | /* set MASK bit */ |
| 5433 | iobp->int_ctl[IOB_DEV_SSI] |= IOB_INT_CTL_MASK; |
| 5434 | |
| 5435 | /* now go async deliver the interrupt */ |
| 5436 | strand = IOB_INT_MAN_CPUID(npp->iobp->int_man[IOB_DEV_SSI]); |
| 5437 | nsp = &(npp->ss_strandp[strand]); |
| 5438 | v9p = npp->strand[strand]; |
| 5439 | sp = v9p->simp; |
| 5440 | |
| 5441 | pthread_mutex_lock(&nsp->irq_lock); |
| 5442 | pay_attention = (0LL == nsp->irq_vector); |
| 5443 | nsp->irq_vector |= |
| 5444 | (uint64_t)1<<(iobp->int_man[IOB_DEV_SSI]&INTR_VEC_MASK); |
| 5445 | pthread_mutex_unlock(&nsp->irq_lock); |
| 5446 | DBGSSI( lprintf(sp->gid, "SSI ext_signal: nsp=%p irq_vector=%llx\n", nsp, nsp->irq_vector); ); |
| 5447 | |
| 5448 | if (pay_attention) { |
| 5449 | sp->async_event = true; |
| 5450 | DBGSSI( lprintf(sp->gid, "SSI ext_signal: attention set\n"); ); |
| 5451 | } |
| 5452 | } |
| 5453 | |
| 5454 | pthread_mutex_unlock(&iobp->iob_lock); |
| 5455 | return (0); |
| 5456 | |
| 5457 | case ES_SPOR: |
| 5458 | for (i=(npp->nstrands)-1; i>=0; i--) { |
| 5459 | v9p = npp->strand[i]; |
| 5460 | nsp = (ss_strand_t *)(v9p->impl_specificp); |
| 5461 | nsp->pending_async_tt = SS_trap_power_on_reset; |
| 5462 | sp = v9p->simp; |
| 5463 | sp->exception_pending = true; |
| 5464 | } |
| 5465 | return (0); |
| 5466 | |
| 5467 | case ES_XIR: |
| 5468 | /* |
| 5469 | * OK every strand on this CPU gets a reset signal |
| 5470 | * FIXME: wake up sleeping strands or error state strands |
| 5471 | */ |
| 5472 | for (i=(npp->nstrands)-1; i>=0; i--) { |
| 5473 | v9p = npp->strand[i]; |
| 5474 | nsp = (ss_strand_t *)(v9p->impl_specificp); |
| 5475 | nsp->pending_async_tt = SS_trap_externally_initiated_reset; |
| 5476 | sp = v9p->simp; |
| 5477 | DBGE( lprintf(sp->gid, "ES_XIR set_attention\n"); ); |
| 5478 | sp->exception_pending = true; |
| 5479 | } |
| 5480 | return (0); |
| 5481 | |
| 5482 | default: |
| 5483 | EXEC_WARNING(("processor%d: ext_signal %d ignored", |
| 5484 | config_procp->proc_id, sigtype)); |
| 5485 | return (0); |
| 5486 | } |
| 5487 | } |
| 5488 | |
| 5489 | |
| 5490 | /* |
| 5491 | * CPU specific instruction decode routine. This routine is called from the main |
| 5492 | * instruction decoder routine only when that routine comes up empty handed (i.e. |
| 5493 | * before declaring it an illegal or unknown instruction.) For now, we don't have |
| 5494 | * any CPU specific instuctions implemented for Niagara, and so the performance |
| 5495 | * impact of making this function call is negligable since it doesn't happen in |
| 5496 | * the common case. |
| 5497 | * |
| 5498 | * This routine returns a pointer to the exec function which is to be run as a |
| 5499 | * result of encountering the instruction op code in question. |
| 5500 | */ |
| 5501 | static op_funcp niagara_decode_me(simcpu_t *sp, xicache_instn_t * xcip, uint32_t instn) |
| 5502 | { |
| 5503 | uint_t rs1, rd, rs2; |
| 5504 | sint32_t simm; |
| 5505 | T2o3_code_t op2c; |
| 5506 | op_funcp exec_funcp; |
| 5507 | |
| 5508 | switch ((ty_code_t)X_OP(instn)) { |
| 5509 | case Ty_2: /* Arithmetic and Misc instructions */ |
| 5510 | rs1 = X_RS1(instn); |
| 5511 | rd = X_RD(instn); |
| 5512 | op2c = (T2o3_code_t)X_OP3(instn); |
| 5513 | |
| 5514 | if (X_I(instn)) { |
| 5515 | simm = X_SIMM13(instn); |
| 5516 | /* register x immediate -> register forms */ |
| 5517 | |
| 5518 | switch ( op2c ) { |
| 5519 | case T2o3_mulscc : |
| 5520 | SET_OPv9(mulscc_imm); |
| 5521 | goto n1_do_imm; |
| 5522 | case T2o3_save : |
| 5523 | SET_OPv9(save_imm); /* rd == 0 determined in instn implemenation */ |
| 5524 | goto n1_do_imm; |
| 5525 | case T2o3_restore : |
| 5526 | SET_OPv9(restore_imm); |
| 5527 | goto n1_do_imm; |
| 5528 | case T2o3_rdasr : |
| 5529 | /* Here I = 1 */ |
| 5530 | if (rd == 0 && rs1==15) { |
| 5531 | if (!CHECK_RESERVED_ZERO(instn, 12, 7)) { |
| 5532 | SET_OP_ILL_REASON(misc_reserved_field_non_zero); |
| 5533 | goto n1_illegal_instruction; |
| 5534 | } |
| 5535 | simm = X_MEMBAR_MASKS(instn); |
| 5536 | SET_OP_SIMM16(simm); /* masks in immediates */ |
| 5537 | SET_OPv9( membar ); |
| 5538 | goto n1_all_done; |
| 5539 | } |
| 5540 | /* XXX if I = 1??? */ |
| 5541 | SET_OPv9( read_state_reg ); |
| 5542 | simm = 0; /* unused */ |
| 5543 | goto n1_do_imm; |
| 5544 | case T2o3_return : |
| 5545 | SET_OPv9( return_imm ); |
| 5546 | goto n1_do_imm; |
| 5547 | case T2o3_flush : |
| 5548 | SET_OPv9(iflush_imm); |
| 5549 | goto n1_do_imm; |
| 5550 | case T2o3_movcc : |
| 5551 | if (!X_FMT4_CC2(instn)) { |
| 5552 | #ifdef FP_DECODE_DISABLED |
| 5553 | if (!((sparcv9_cpu_t*)(sp->specificp))->fpu_on) goto n1_fp_disabled; |
| 5554 | #endif /* FP_DECODE_DISABLED */ |
| 5555 | if (rd == 0) goto n1_do_noop; |
| 5556 | |
| 5557 | /* We attempt to fast path movfcc_a ... */ |
| 5558 | if (X_FMT4_COND(instn) == cond_n) goto n1_do_noop; |
| 5559 | simm = X_SIMM11(instn); |
| 5560 | if (X_FMT4_COND(instn) == cond_a) { |
| 5561 | goto n1_do_move_simm; |
| 5562 | } |
| 5563 | SET_OP_MOVCC_CC(X_FMT4_CC(instn)); |
| 5564 | SET_OP_SIMM16(simm); |
| 5565 | SET_OP_RD(rd); |
| 5566 | SET_OP_MOVCC_COND(X_FMT4_COND(instn)); |
| 5567 | SET_OPv9(movfcc_imm); |
| 5568 | goto n1_all_done; |
| 5569 | } |
| 5570 | |
| 5571 | switch( (cc4bit_t)X_FMT4_CC(instn) ) { |
| 5572 | case CC4bit_icc: SET_OP_MOVCC_CC(0); break; |
| 5573 | case CC4bit_xcc: SET_OP_MOVCC_CC(1); break; |
| 5574 | default: |
| 5575 | SET_OP_ILL_REASON(movcc_illegal_cc_field); |
| 5576 | goto n1_illegal_instruction; |
| 5577 | } |
| 5578 | |
| 5579 | if (rd == 0) goto n1_do_noop; |
| 5580 | |
| 5581 | /* truncate simm - as only an 11 bit |
| 5582 | * immediate in movcc instructions, not the |
| 5583 | * 13 bit field we extracted above |
| 5584 | */ |
| 5585 | simm = X_SIMM11(instn); |
| 5586 | |
| 5587 | if (X_FMT4_COND(instn) == cond_n) goto n1_do_noop; |
| 5588 | if (X_FMT4_COND(instn) == cond_a) goto n1_do_move_simm; |
| 5589 | SET_OP_SIMM16(simm); |
| 5590 | SET_OP_RD(rd); |
| 5591 | SET_OP_MOVCC_COND(X_FMT4_COND(instn)); |
| 5592 | SET_OPv9(movcc_imm); |
| 5593 | goto n1_all_done; |
| 5594 | |
| 5595 | case T2o3_saved: |
| 5596 | n1_saved_instn:; |
| 5597 | { |
| 5598 | int fcn = X_FMT2_FCN(instn); |
| 5599 | |
| 5600 | if (!CHECK_RESERVED_ZERO(instn, 18, 0)) { |
| 5601 | SET_OP_ILL_REASON(saved_reserved_field_non_zero); |
| 5602 | goto n1_illegal_instruction; |
| 5603 | } |
| 5604 | |
| 5605 | switch (fcn) { |
| 5606 | case 0: /* saved */ |
| 5607 | SET_OPv9(saved); |
| 5608 | break; |
| 5609 | case 1: |
| 5610 | SET_OPv9(restored); |
| 5611 | break; |
| 5612 | default: |
| 5613 | SET_OP_ILL_REASON(saved_fcn_invalid); |
| 5614 | goto n1_illegal_instruction; |
| 5615 | } |
| 5616 | goto n1_all_done; |
| 5617 | } |
| 5618 | |
| 5619 | case T2o3_retry : |
| 5620 | n1_done_retry_instn:; |
| 5621 | switch(X_FMT3_FCN(instn)) { |
| 5622 | case 0: |
| 5623 | SET_OP_MISC_BITS((uint_t)true); |
| 5624 | break; |
| 5625 | case 1: |
| 5626 | SET_OP_MISC_BITS((uint_t)false); |
| 5627 | break; |
| 5628 | default: |
| 5629 | SET_OP_ILL_REASON(done_retry_illegal_fcn_field); |
| 5630 | goto n1_illegal_instruction; |
| 5631 | } |
| 5632 | SET_OPv9(done_retry); |
| 5633 | goto n1_all_done; |
| 5634 | default: |
| 5635 | break; |
| 5636 | } |
| 5637 | } else { |
| 5638 | rs2 = X_RS2(instn); |
| 5639 | /* register x register -> register forms */ |
| 5640 | switch ( op2c ) { |
| 5641 | case T2o3_mulscc : |
| 5642 | SET_OPv9(mulscc_rrr); |
| 5643 | goto n1_do_rrr; |
| 5644 | case T2o3_save : |
| 5645 | SET_OPv9(save_rrr); /* rd == 0 determined in instn implemenation */ |
| 5646 | goto n1_do_rrr; |
| 5647 | case T2o3_restore : |
| 5648 | /* Rd == 0 handled by instruction */ |
| 5649 | SET_OPv9(restore_rrr); |
| 5650 | goto n1_do_rrr; |
| 5651 | case T2o3_return : |
| 5652 | SET_OPv9( return_rrr ); |
| 5653 | goto n1_do_rrr; |
| 5654 | case T2o3_flush : |
| 5655 | if (rd != 0) |
| 5656 | goto n1_illegal_instruction; |
| 5657 | SET_OPv9(iflush_rr); |
| 5658 | goto n1_do_rrr; |
| 5659 | case T2o3_saved: |
| 5660 | goto n1_saved_instn; |
| 5661 | case T2o3_retry : |
| 5662 | goto n1_done_retry_instn; |
| 5663 | default: |
| 5664 | break; |
| 5665 | } |
| 5666 | } |
| 5667 | default: |
| 5668 | break; |
| 5669 | } |
| 5670 | |
| 5671 | n1_unknown_decode: |
| 5672 | return (NULL); |
| 5673 | |
| 5674 | #ifdef FP_DECODE_DISABLED |
| 5675 | n1_fp_disabled:; |
| 5676 | SET_OPv9(fp_unimplemented_instruction); |
| 5677 | goto n1_all_done; |
| 5678 | #endif /* FP_DECODE_DISABLED */ |
| 5679 | |
| 5680 | |
| 5681 | n1_do_imm: |
| 5682 | SET_OP_RD(rd); |
| 5683 | SET_OP_RS1(rs1); |
| 5684 | SET_OP_SIMM16(simm); |
| 5685 | goto n1_all_done; |
| 5686 | |
| 5687 | n1_do_move_simm: |
| 5688 | SET_OP( move_simm ); |
| 5689 | SET_OP_RD(rd); |
| 5690 | SET_OP_SIMM32(simm); |
| 5691 | goto n1_all_done; |
| 5692 | |
| 5693 | n1_do_rrr: |
| 5694 | SET_OP_RD(rd); |
| 5695 | SET_OP_RS1(rs1); |
| 5696 | SET_OP_RS2(rs2); |
| 5697 | goto n1_all_done; |
| 5698 | |
| 5699 | n1_do_noop: |
| 5700 | SET_OP( noop ); |
| 5701 | goto n1_all_done; |
| 5702 | |
| 5703 | |
| 5704 | n1_illegal_instruction: |
| 5705 | SET_OPv9(illegal_instruction); |
| 5706 | |
| 5707 | n1_all_done:; |
| 5708 | return (exec_funcp); |
| 5709 | |
| 5710 | } |
| 5711 | |
| 5712 | void niagara_get_pseudo_dev(config_proc_t *config_procp, char *dev_namep, void *devp) |
| 5713 | { |
| 5714 | /* |
| 5715 | * This Niagara specific function is not implemented yet. |
| 5716 | */ |
| 5717 | } |
| 5718 | |
| 5719 | void niagara_domain_check(domain_t * domainp) |
| 5720 | { |
| 5721 | /* |
| 5722 | * This Niagara specific function is not implemented yet. |
| 5723 | */ |
| 5724 | } |
| 5725 | |
| 5726 | void niagara_set_sfsr(simcpu_t *sp, ss_mmu_t *mmup, tvaddr_t addr, |
| 5727 | uint_t ft, ss_ctx_t ct, uint_t asi, uint_t w, uint_t e) |
| 5728 | { |
| 5729 | uint64_t new_sfsr; |
| 5730 | |
| 5731 | new_sfsr = MMU_SFSR_FV; |
| 5732 | if ((mmup->sfsr & MMU_SFSR_FV) != 0) |
| 5733 | new_sfsr |= MMU_SFSR_OW; |
| 5734 | new_sfsr |= (ft << MMU_SFSR_FT_SHIFT); |
| 5735 | new_sfsr |= (ct << MMU_SFSR_CT_SHIFT); |
| 5736 | if (e) |
| 5737 | new_sfsr |= MMU_SFSR_E; |
| 5738 | if (w) |
| 5739 | new_sfsr |= MMU_SFSR_W; |
| 5740 | new_sfsr |= (asi << MMU_SFSR_ASI_SHIFT); |
| 5741 | if (!mmup->is_immu) { |
| 5742 | mmup->sfar = VA48(addr); |
| 5743 | DBGMMU( lprintf(sp->gid, "%cMMU SFSR update 0x%llx -> 0x%llx SFAR=0x%llx\n", mmup->is_immu ? 'I' : 'D', mmup->sfsr, new_sfsr, mmup->sfar); ); |
| 5744 | } else { |
| 5745 | DBGMMU( lprintf(sp->gid, "%cMMU SFSR update 0x%ll-> 0x%llx x\n", mmup->is_immu ? 'I' : 'D', mmup->sfsr, new_sfsr); ); |
| 5746 | } |
| 5747 | mmup->sfsr = new_sfsr; |
| 5748 | } |
| 5749 | |
| 5750 | /* |
| 5751 | * Below are CPU specific error injection routines. They are called when an |
| 5752 | * error condition is detected clears the error flags if no more errors to |
| 5753 | * post error condition may not be cleared if handling required |
| 5754 | * eg. demap tlb entry with bad parity or flush cacheline with bad ecc |
| 5755 | */ |
| 5756 | #if ERROR_INJECTION |
| 5757 | void extract_error_type(error_conf_t * errorconfp) |
| 5758 | { |
| 5759 | errorconfp->type_namep = strdup(lex.strp); |
| 5760 | |
| 5761 | if (streq(lex.strp,"IRC")) |
| 5762 | errorconfp->type = IRC; |
| 5763 | else if (streq(lex.strp,"IRU")) |
| 5764 | errorconfp->type = IRU; |
| 5765 | else if (streq(lex.strp,"FRC")) |
| 5766 | errorconfp->type = FRC; |
| 5767 | else if (streq(lex.strp,"FRU")) |
| 5768 | errorconfp->type = FRU; |
| 5769 | else if (streq(lex.strp,"IMTU")) |
| 5770 | errorconfp->type = IMTU; |
| 5771 | else if (streq(lex.strp,"IMDU")) |
| 5772 | errorconfp->type = IMDU; |
| 5773 | else if (streq(lex.strp,"DMTU")) |
| 5774 | errorconfp->type = DMTU; |
| 5775 | else if (streq(lex.strp,"DMDU")) |
| 5776 | errorconfp->type = DMDU; |
| 5777 | else if (streq(lex.strp,"DMSU")) |
| 5778 | errorconfp->type = DMSU; |
| 5779 | else if (streq(lex.strp,"ITC")) |
| 5780 | errorconfp->type = ITC; |
| 5781 | else if (streq(lex.strp,"IDC")) |
| 5782 | errorconfp->type = IDC; |
| 5783 | else if (streq(lex.strp,"DTC")) |
| 5784 | errorconfp->type = DTC; |
| 5785 | else if (streq(lex.strp,"DDC")) |
| 5786 | errorconfp->type = DDC; |
| 5787 | else if (streq(lex.strp,"MAU")) |
| 5788 | errorconfp->type = MAU; |
| 5789 | else if (streq(lex.strp,"LDRC")) |
| 5790 | errorconfp->type = LDRC; |
| 5791 | else if (streq(lex.strp,"LDSC")) |
| 5792 | errorconfp->type = LDSC; |
| 5793 | else if (streq(lex.strp,"LTC")) |
| 5794 | errorconfp->type = LTC; |
| 5795 | else if (streq(lex.strp,"LDAC")) |
| 5796 | errorconfp->type = LDAC; |
| 5797 | else if (streq(lex.strp,"LDWC")) |
| 5798 | errorconfp->type = LDWC; |
| 5799 | else if (streq(lex.strp,"LDAU")) |
| 5800 | errorconfp->type = LDAU; |
| 5801 | else if (streq(lex.strp,"LDWU")) |
| 5802 | errorconfp->type = LDWU; |
| 5803 | else if (streq(lex.strp,"DAC")) |
| 5804 | errorconfp->type = DAC; |
| 5805 | else if (streq(lex.strp,"DRC")) |
| 5806 | errorconfp->type = DRC; |
| 5807 | else if (streq(lex.strp,"DSC")) |
| 5808 | errorconfp->type = DSC; |
| 5809 | else if (streq(lex.strp,"DAU")) |
| 5810 | errorconfp->type = DAU; |
| 5811 | else if (streq(lex.strp,"DSU")) |
| 5812 | errorconfp->type = DSU; |
| 5813 | else |
| 5814 | lex_fatal("unknown error type parsing error config"); |
| 5815 | } |
| 5816 | |
| 5817 | void update_errflags(simcpu_t * sp) |
| 5818 | { |
| 5819 | sp->errorp->check_xdcache = find_errconf(sp, (LD|ST), |
| 5820 | (DTC|DDC|IRC|IRU|FRC|FRU|LDAC|LDWC|LDAU|LDWU|DAC|DAU)) ? true : false; |
| 5821 | sp->errorp->check_xicache = (find_errconf(sp, IFETCH, |
| 5822 | (ITC|IDC|LDAC|LDAU|DAC|DAU))) ? true : false; |
| 5823 | sp->errorp->check_dtlb = (find_errconf(sp, (LD|ST), |
| 5824 | (DMDU|DMSU))) ? true : false; |
| 5825 | } |
| 5826 | |
| 5827 | /* |
| 5828 | * If demap of tlb entry with parity error detected then remove error config |
| 5829 | */ |
| 5830 | void tlb_entry_error_match(simcpu_t * sp, ss_mmu_t * mmup, tlb_entry_t * tep) |
| 5831 | { |
| 5832 | error_conf_t * ep; |
| 5833 | |
| 5834 | if (sp->error_enabled) { |
| 5835 | if (sp->errorp->itep == tep && mmup->is_immu) { |
| 5836 | if ((ep = find_errconf(sp, IFETCH, IMDU)) == NULL) |
| 5837 | goto tlb_warning; |
| 5838 | if (remove_errconf(sp, ep) == NULL) |
| 5839 | clear_errflags(sp); else update_errflags(sp); |
| 5840 | sp->errorp->itep = NULL; |
| 5841 | return; |
| 5842 | } else |
| 5843 | if (sp->errorp->dtep == tep && !mmup->is_immu) { |
| 5844 | if ((ep = find_errconf(sp, (LD|ST), DMDU)) == NULL) |
| 5845 | goto tlb_warning; |
| 5846 | if (remove_errconf(sp, ep) == NULL) |
| 5847 | clear_errflags(sp); else update_errflags(sp); |
| 5848 | sp->errorp->dtep = NULL; |
| 5849 | return; |
| 5850 | } |
| 5851 | return; |
| 5852 | |
| 5853 | tlb_warning: EXEC_WARNING(("tlb_entry_error_match(): tracking tlb" |
| 5854 | " entry in error for non-existent error config")); |
| 5855 | } |
| 5856 | } |
| 5857 | |
| 5858 | |
| 5859 | void ss_error_condition(simcpu_t * sp, error_conf_t * ep) |
| 5860 | { |
| 5861 | ss_strand_t * nsp; |
| 5862 | ss_proc_t * npp; |
| 5863 | ss_l2_cache_t * l2p; |
| 5864 | ss_dram_bank_t * dbp; |
| 5865 | simcpu_t * esp; |
| 5866 | sparcv9_cpu_t * v9p; |
| 5867 | sparcv9_trap_type_t tt; |
| 5868 | error_t * errorp; |
| 5869 | uint8_t bank,tid; |
| 5870 | uint_t idx; |
| 5871 | |
| 5872 | v9p = sp->specificp; |
| 5873 | nsp = v9p->impl_specificp; |
| 5874 | npp = sp->config_procp->procp; |
| 5875 | errorp = sp->errorp; |
| 5876 | |
| 5877 | DBGERR( lprintf(sp->gid, "ss_error_condition() etype = %s\n", ep->type_namep); ); |
| 5878 | |
| 5879 | switch (ep->type) { |
| 5880 | case IRC: |
| 5881 | nsp->error.status = NA_IRC_bit; |
| 5882 | nsp->error.addr = (I_REG_NUM(errorp->reg) | I_REG_WIN(v9p->cwp) |
| 5883 | | I_SYND(IREG_FAKE_SYND_SINGLE)); |
| 5884 | if (nsp->error.enabled & NA_CEEN) { |
| 5885 | tt = (sparcv9_trap_type_t)SS_trap_ECC_error; |
| 5886 | v9p->post_precise_trap(sp, tt); |
| 5887 | } |
| 5888 | if (remove_errconf(sp, ep) == NULL) |
| 5889 | clear_errflags(sp); else update_errflags(sp); |
| 5890 | break; |
| 5891 | case IRU: |
| 5892 | nsp->error.status = NA_IRU_bit; |
| 5893 | nsp->error.addr = (I_REG_NUM(errorp->reg) | I_REG_WIN(v9p->cwp) |
| 5894 | | I_SYND(IREG_FAKE_SYND_DOUBLE)); |
| 5895 | if (nsp->error.enabled & NA_NCEEN) { |
| 5896 | tt = Sparcv9_trap_internal_processor_error; |
| 5897 | v9p->post_precise_trap(sp, tt); |
| 5898 | } |
| 5899 | if (remove_errconf(sp, ep) == NULL) |
| 5900 | clear_errflags(sp); else update_errflags(sp); |
| 5901 | break; |
| 5902 | case FRC: |
| 5903 | nsp->error.status = NA_FRC_bit; |
| 5904 | nsp->error.addr = (F_REG_NUM(errorp->reg) | |
| 5905 | EVEN_SYND(FREG_FAKE_SYND_SINGLE) | ODD_SYND(NULL)); |
| 5906 | if (nsp->error.enabled & NA_CEEN) { |
| 5907 | tt = (sparcv9_trap_type_t)SS_trap_ECC_error; |
| 5908 | v9p->post_precise_trap(sp, tt); |
| 5909 | } |
| 5910 | if (remove_errconf(sp, ep) == NULL) |
| 5911 | clear_errflags(sp); else update_errflags(sp); |
| 5912 | break; |
| 5913 | case FRU: |
| 5914 | nsp->error.status = NA_FRU_bit; |
| 5915 | nsp->error.addr = (F_REG_NUM(errorp->reg) | |
| 5916 | EVEN_SYND(FREG_FAKE_SYND_DOUBLE) | ODD_SYND(NULL)); |
| 5917 | if (nsp->error.enabled & NA_NCEEN) { |
| 5918 | tt = Sparcv9_trap_internal_processor_error; |
| 5919 | v9p->post_precise_trap(sp, tt); |
| 5920 | } |
| 5921 | if (remove_errconf(sp, ep) == NULL) |
| 5922 | clear_errflags(sp); else update_errflags(sp); |
| 5923 | break; |
| 5924 | case IMTU: |
| 5925 | nsp->error.status = (NA_PRIV_bit|NA_IMTU_bit); |
| 5926 | nsp->error.addr = TLB_INDEX(errorp->tlb_idx[IMTU_IDX]); |
| 5927 | errorp->tlb_idx[IMTU_IDX] = NULL; |
| 5928 | if (nsp->error.enabled & NA_NCEEN) { |
| 5929 | tt = Sparcv9_trap_data_access_error; |
| 5930 | v9p->post_precise_trap(sp, tt); |
| 5931 | } |
| 5932 | if (remove_errconf(sp, ep) == NULL) |
| 5933 | clear_errflags(sp); else update_errflags(sp); |
| 5934 | break; |
| 5935 | case IMDU: |
| 5936 | if (ep->op == ASI_LD) { |
| 5937 | nsp->error.status = (NA_PRIV_bit|NA_IMDU_bit); |
| 5938 | nsp->error.addr = TLB_INDEX(errorp->tlb_idx[IMDU_IDX]); |
| 5939 | errorp->tlb_idx[IMDU_IDX] = NULL; |
| 5940 | if (nsp->error.enabled & NA_NCEEN) { |
| 5941 | tt = Sparcv9_trap_data_access_error; |
| 5942 | v9p->post_precise_trap(sp, tt); |
| 5943 | } |
| 5944 | if (remove_errconf(sp, ep) == NULL) |
| 5945 | clear_errflags(sp); else update_errflags(sp); |
| 5946 | } else { |
| 5947 | nsp->error.status = NA_IMDU_bit; |
| 5948 | nsp->error.status |= (ep->priv == V9_HyperPriv || |
| 5949 | ep->priv == V9_Priv) ? NA_PRIV_bit : 0; |
| 5950 | nsp->error.addr = MMU_PC(sp->pc); |
| 5951 | if (nsp->error.enabled & NA_NCEEN) { |
| 5952 | tt = Sparcv9_trap_instruction_access_error; |
| 5953 | v9p->post_precise_trap(sp, tt); |
| 5954 | } |
| 5955 | } |
| 5956 | break; |
| 5957 | case DMTU: |
| 5958 | nsp->error.status = (NA_PRIV_bit|NA_IMTU_bit); |
| 5959 | nsp->error.addr = TLB_INDEX(errorp->tlb_idx[DMTU_IDX]); |
| 5960 | errorp->tlb_idx[DMTU_IDX] = NULL; |
| 5961 | if (nsp->error.enabled & NA_NCEEN) { |
| 5962 | tt = Sparcv9_trap_data_access_error; |
| 5963 | v9p->post_precise_trap(sp, tt); |
| 5964 | } |
| 5965 | if (remove_errconf(sp, ep) == NULL) |
| 5966 | clear_errflags(sp); else update_errflags(sp); |
| 5967 | break; |
| 5968 | case DMDU: |
| 5969 | if (ep->op == ASI_LD) { |
| 5970 | nsp->error.status = (NA_PRIV_bit|NA_DMDU_bit); |
| 5971 | nsp->error.addr = TLB_INDEX(errorp->tlb_idx[DMDU_IDX]); |
| 5972 | errorp->tlb_idx[DMDU_IDX] = NULL; |
| 5973 | if (nsp->error.enabled & NA_NCEEN) { |
| 5974 | tt = Sparcv9_trap_data_access_error; |
| 5975 | v9p->post_precise_trap(sp, tt); |
| 5976 | } |
| 5977 | if (remove_errconf(sp, ep) == NULL) |
| 5978 | clear_errflags(sp); else update_errflags(sp); |
| 5979 | } else { |
| 5980 | nsp->error.status = NA_DMDU_bit; |
| 5981 | nsp->error.status |= (ep->priv == V9_HyperPriv || |
| 5982 | ep->priv == V9_Priv) ? NA_PRIV_bit : 0; |
| 5983 | nsp->error.addr = MMU_VA(errorp->addr); |
| 5984 | if (nsp->error.enabled & NA_NCEEN) { |
| 5985 | tt = Sparcv9_trap_data_access_error; |
| 5986 | v9p->post_precise_trap(sp, tt); |
| 5987 | } |
| 5988 | } |
| 5989 | break; |
| 5990 | case DMSU: |
| 5991 | nsp->error.status = NA_DMSU_bit; |
| 5992 | nsp->error.status |= (ep->priv == V9_HyperPriv || |
| 5993 | ep->priv == V9_Priv) ? NA_PRIV_bit : 0; |
| 5994 | nsp->error.addr = MMU_VA(errorp->addr); |
| 5995 | if (nsp->error.enabled & NA_NCEEN) { |
| 5996 | tt = Sparcv9_trap_data_access_error; |
| 5997 | v9p->post_precise_trap(sp, tt); |
| 5998 | } |
| 5999 | break; |
| 6000 | case ITC: |
| 6001 | nsp->error.status = NA_ITC_bit; |
| 6002 | goto icache_error; |
| 6003 | case IDC: |
| 6004 | nsp->error.status = NA_IDC_bit; |
| 6005 | icache_error: nsp->error.status |= (ep->priv == V9_HyperPriv || |
| 6006 | ep->priv == V9_Priv) ? NA_PRIV_bit : 0; |
| 6007 | nsp->error.addr = L1_PA(errorp->addr); |
| 6008 | if (nsp->error.enabled & NA_CEEN) { |
| 6009 | tt = (sparcv9_trap_type_t)SS_trap_ECC_error; |
| 6010 | v9p->post_precise_trap(sp, tt); |
| 6011 | } |
| 6012 | if (remove_errconf(sp, ep) == NULL) |
| 6013 | clear_errflags(sp); else update_errflags(sp); |
| 6014 | break; |
| 6015 | case DTC: |
| 6016 | nsp->error.status = NA_DTC_bit; |
| 6017 | goto dcache_error; |
| 6018 | case DDC: |
| 6019 | nsp->error.status = NA_DDC_bit; |
| 6020 | dcache_error: nsp->error.status |= (ep->priv == V9_HyperPriv || |
| 6021 | ep->priv == V9_Priv) ? NA_PRIV_bit : 0; |
| 6022 | nsp->error.addr = L1_PA(errorp->addr); |
| 6023 | if (nsp->error.enabled & NA_CEEN) { |
| 6024 | tt = (sparcv9_trap_type_t)SS_trap_ECC_error; |
| 6025 | v9p->post_precise_trap(sp, tt); |
| 6026 | } |
| 6027 | if (remove_errconf(sp, ep) == NULL) |
| 6028 | clear_errflags(sp); else update_errflags(sp); |
| 6029 | break; |
| 6030 | case MAU: |
| 6031 | if (remove_errconf(sp, ep) == NULL) clear_errflags(sp); |
| 6032 | IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep)); |
| 6033 | break; |
| 6034 | case LDAC: |
| 6035 | bank = (errorp->addr >> 6) & 0x3; |
| 6036 | l2p = npp->l2p; |
| 6037 | tid = nsp->vcore_id; |
| 6038 | l2p->error_status[bank] = L2_LDAC_bit | L2_TID(tid) | L2_VEC_bit | |
| 6039 | L2_FAKE_SYND_SINGLE | errorp->l2_write; |
| 6040 | l2p->error_address[bank] = L2_PA_LINE(errorp->addr); |
| 6041 | if ((nsp->error.enabled & NA_CEEN) && |
| 6042 | (l2p->error_enable[bank] & L2_CEEN)) { |
| 6043 | tt = (sparcv9_trap_type_t)SS_trap_ECC_error; |
| 6044 | v9p->post_precise_trap(sp, tt); |
| 6045 | } |
| 6046 | /* l2 corrected on partial store or atomic hit */ |
| 6047 | if (errorp->l2_write) { |
| 6048 | npp->errorp->ldac_addr = NULL; |
| 6049 | ss_set_errcheck(npp); |
| 6050 | } else { |
| 6051 | uint_t idx; |
| 6052 | |
| 6053 | /* l2 uncorrected on load/ifetch hit so make error proc-wide */ |
| 6054 | npp->error_check = true; |
| 6055 | npp->errorp->ldac_addr = errorp->addr; |
| 6056 | /* |
| 6057 | * NB: proper behavior is to flush all cpu xdcache's |
| 6058 | * but there is no lock on the xdc so I didn't try it |
| 6059 | */ |
| 6060 | sp->xdcache_trans_flush_pending = true; |
| 6061 | } |
| 6062 | |
| 6063 | /* bit of a hack - some errorconf's aren't owned by sp's so free them */ |
| 6064 | if (ep->npp) free(ep); |
| 6065 | else { |
| 6066 | if (remove_errconf(sp, ep) == NULL) |
| 6067 | clear_errflags(sp); else update_errflags(sp); |
| 6068 | } |
| 6069 | break; |
| 6070 | case LDWC: |
| 6071 | bank = (errorp->addr >> 6) & 0x3; |
| 6072 | l2p = npp->l2p; |
| 6073 | tid = nsp->vcore_id; |
| 6074 | l2p->error_status[bank] = L2_LDWC_bit | L2_TID(tid) | L2_VEC_bit | |
| 6075 | L2_FAKE_SYND_SINGLE | L2_RW_bit; |
| 6076 | l2p->error_address[bank] = L2_PA_LINE(errorp->addr); |
| 6077 | tid = (l2p->control[bank] & L2_ERRORSTEER); |
| 6078 | v9p = npp->strand[STRANDID2IDX(npp, tid)]; |
| 6079 | nsp = v9p->impl_specificp; |
| 6080 | esp = v9p->simp; |
| 6081 | if ((nsp->error.enabled & NA_CEEN) && |
| 6082 | (l2p->error_enable[bank] & L2_CEEN)) { |
| 6083 | tt = (sparcv9_trap_type_t)SS_trap_ECC_error; |
| 6084 | v9p->post_precise_trap(esp, tt); |
| 6085 | } |
| 6086 | if (remove_errconf(sp, ep) == NULL) |
| 6087 | clear_errflags(sp); else update_errflags(sp); |
| 6088 | break; |
| 6089 | case LDRC: |
| 6090 | case LDSC: |
| 6091 | if (remove_errconf(sp, ep) == NULL) clear_errflags(sp); |
| 6092 | IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep)); |
| 6093 | break; |
| 6094 | case LDAU: |
| 6095 | bank = (errorp->addr >> 6) & 0x3; |
| 6096 | l2p = npp->l2p; |
| 6097 | tid = nsp->vcore_id; |
| 6098 | l2p->error_status[bank] = L2_LDAU_bit | L2_TID(tid) | L2_VEU_bit | |
| 6099 | L2_FAKE_SYND_DOUBLE | errorp->l2_write; |
| 6100 | l2p->error_address[bank] = L2_PA_LINE(errorp->addr); |
| 6101 | if (l2p->error_enable[bank] & L2_NCEEN) { |
| 6102 | nsp->error.status = NA_LDAU_bit; |
| 6103 | nsp->error.status |= (ep->priv == V9_HyperPriv || |
| 6104 | ep->priv == V9_Priv) ? NA_PRIV_bit : 0; |
| 6105 | nsp->error.addr = L1_PA(errorp->addr); |
| 6106 | if (nsp->error.enabled & NA_NCEEN) { |
| 6107 | tt = (ep->type == IFETCH) |
| 6108 | ? Sparcv9_trap_instruction_access_error |
| 6109 | : Sparcv9_trap_data_access_error; |
| 6110 | v9p->post_precise_trap(sp, tt); |
| 6111 | } |
| 6112 | } |
| 6113 | /* |
| 6114 | * store error info to cacheline for error handler diag access |
| 6115 | * and to support direct-mapped mode displacement flushing |
| 6116 | */ |
| 6117 | /* index stores to a 32bit word and its ECC+rsvd bits */ |
| 6118 | idx = errorp->addr & (L2_WAY | L2_LINE | L2_BANK | L2_WORD) >> 2; |
| 6119 | /* put oddeven select bit low so data is in addr order */ |
| 6120 | idx |= ((errorp->addr >> L2_ODDEVEN_SHIFT) & 1); |
| 6121 | l2p->diag_datap[idx] = ((0xabbadada << 7) | L2_FAKE_SYND_DOUBLE); |
| 6122 | |
| 6123 | /* index stores to a tag and its ECC+rsvd bits */ |
| 6124 | idx = errorp->addr & (L2_WAY | L2_LINE | L2_BANK) >> 6; |
| 6125 | l2p->diag_tagp[idx] = (errorp->addr & L2_TAG) >> 12; |
| 6126 | |
| 6127 | /* index valid/dirty or alloc/used bits and parity */ |
| 6128 | idx = errorp->addr & (L2_LINE | L2_BANK) >> 6; |
| 6129 | idx |= ((errorp->addr & L2_VDSEL) >> 10); |
| 6130 | l2p->diag_vuadp[idx] = 0xfff << 12; /* all lines valid/clean */ |
| 6131 | |
| 6132 | /* uncorrectible error in l2 so make it proc-wide */ |
| 6133 | npp->error_check = true; |
| 6134 | npp->errorp->ldau_addr = errorp->addr; |
| 6135 | sp->xdcache_trans_flush_pending = true; |
| 6136 | |
| 6137 | /* bit of a hack - some errorconf's aren't owned by sp's so free them */ |
| 6138 | if (ep->npp) free(ep); |
| 6139 | else { |
| 6140 | if (remove_errconf(sp, ep) == NULL) |
| 6141 | clear_errflags(sp); else update_errflags(sp); |
| 6142 | } |
| 6143 | break; |
| 6144 | case LDWU: |
| 6145 | bank = (errorp->addr >> 6) & 0x3; |
| 6146 | l2p = npp->l2p; |
| 6147 | tid = nsp->vcore_id; |
| 6148 | l2p->error_status[bank] = L2_LDWU_bit | L2_TID(tid) | L2_VEU_bit | |
| 6149 | L2_FAKE_SYND_DOUBLE | L2_RW_bit; |
| 6150 | l2p->error_address[bank] = L2_PA_LINE(errorp->addr); |
| 6151 | if ((nsp->error.enabled & NA_NCEEN) && |
| 6152 | (l2p->error_enable[bank] & L2_NCEEN)) { |
| 6153 | tid = (l2p->control[bank] & L2_ERRORSTEER); |
| 6154 | v9p = npp->strand[STRANDID2IDX(npp, tid)]; |
| 6155 | esp = v9p->simp; |
| 6156 | tt = (sparcv9_trap_type_t)N1_trap_data_error; |
| 6157 | v9p->post_precise_trap(esp, tt); |
| 6158 | } |
| 6159 | npp->error_check = true; |
| 6160 | npp->errorp->ldau_addr = errorp->addr; |
| 6161 | |
| 6162 | /* bit of a hack - some errorconf's aren't owned by sp's so free them */ |
| 6163 | if (ep->npp) free(ep); |
| 6164 | else { |
| 6165 | if (remove_errconf(sp, ep) == NULL) |
| 6166 | clear_errflags(sp); else update_errflags(sp); |
| 6167 | } |
| 6168 | break; |
| 6169 | case LDRU: |
| 6170 | case LDSU: |
| 6171 | case LTC: |
| 6172 | case LVU: |
| 6173 | case LRU: |
| 6174 | if (remove_errconf(sp, ep) == NULL) clear_errflags(sp); |
| 6175 | IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep)); |
| 6176 | break; |
| 6177 | case DAC: |
| 6178 | l2p = npp->l2p; |
| 6179 | bank = (errorp->addr >> 6) & 0x3; |
| 6180 | dbp = &(npp->mbankp[bank]); |
| 6181 | dbp->error_status = DRAM_DAC_bit | DRAM_FAKE_SYND_SINGLE; |
| 6182 | |
| 6183 | /* if store miss and L2 disabled then only set DRAM error status */ |
| 6184 | if (ep->op == ST && !errorp->partial_st) { |
| 6185 | for (bank=0; bank<npp->num_l2banks; bank++) { |
| 6186 | if (l2p->control[bank] & L2_DIS) |
| 6187 | break; |
| 6188 | } |
| 6189 | } |
| 6190 | |
| 6191 | bank = (errorp->addr >> 6) & 0x3; |
| 6192 | tid = nsp->vcore_id; |
| 6193 | l2p->error_status[bank] = L2_DAC_bit | L2_TID(tid) | L2_VEC_bit | |
| 6194 | errorp->l2_write; |
| 6195 | l2p->error_address[bank] = L2_PA_LINE(errorp->addr); |
| 6196 | if ((nsp->error.enabled & NA_CEEN) && |
| 6197 | (l2p->error_enable[bank] & L2_CEEN)) { |
| 6198 | /* |
| 6199 | * partial stores and odd-numbered cache lines |
| 6200 | * redirected to errorsteer thread |
| 6201 | */ |
| 6202 | if (errorp->partial_st || (errorp->addr & 0x40)) { |
| 6203 | tid = (l2p->control[bank] & L2_ERRORSTEER); |
| 6204 | v9p = npp->strand[STRANDID2IDX(npp, tid)]; |
| 6205 | esp = v9p->simp; |
| 6206 | l2p->error_status[bank] &= ~(errorp->l2_write); |
| 6207 | tt = (sparcv9_trap_type_t)SS_trap_ECC_error; |
| 6208 | v9p->post_precise_trap(esp, tt); |
| 6209 | } else { |
| 6210 | tt = (sparcv9_trap_type_t)SS_trap_ECC_error; |
| 6211 | v9p->post_precise_trap(sp, tt); |
| 6212 | } |
| 6213 | } |
| 6214 | if (remove_errconf(sp, ep) == NULL) |
| 6215 | clear_errflags(sp); else update_errflags(sp); |
| 6216 | break; |
| 6217 | case DSC: |
| 6218 | case DAU: |
| 6219 | l2p = npp->l2p; |
| 6220 | bank = (errorp->addr >> 6) & 0x3; |
| 6221 | dbp = &(npp->mbankp[bank]); |
| 6222 | dbp->error_status = DRAM_DAU_bit | DRAM_FAKE_SYND_DOUBLE; |
| 6223 | tid = nsp->vcore_id; |
| 6224 | l2p->error_status[bank] = L2_DAU_bit | L2_TID(tid) | L2_VEU_bit | |
| 6225 | errorp->l2_write; |
| 6226 | l2p->error_address[bank] = L2_PA_LINE(errorp->addr); |
| 6227 | if (l2p->error_enable[bank] & L2_NCEEN) { |
| 6228 | nsp->error.status = NA_LDAU_bit; /* as per Table 12-4 of PRM */ |
| 6229 | nsp->error.status |= (ep->priv == V9_HyperPriv || |
| 6230 | ep->priv == V9_Priv) ? NA_PRIV_bit : 0; |
| 6231 | /* |
| 6232 | * partial stores and odd-numbered cache lines |
| 6233 | * redirected to errorsteer thread |
| 6234 | */ |
| 6235 | if (errorp->partial_st || (errorp->addr & 0x40)) { |
| 6236 | tid = (l2p->control[bank] & L2_ERRORSTEER); |
| 6237 | v9p = npp->strand[STRANDID2IDX(npp, tid)]; |
| 6238 | esp = v9p->simp; |
| 6239 | l2p->error_status[bank] &= ~(errorp->l2_write); |
| 6240 | /* |
| 6241 | * set address to non-requested 16B block |
| 6242 | * within the same 64B cache line |
| 6243 | */ |
| 6244 | if (!errorp->partial_st) |
| 6245 | errorp->addr = (errorp->addr & ~0x30) | |
| 6246 | (((errorp->addr & 0x30) + 0x10) % 0x40); |
| 6247 | nsp->error.addr = L1_PA(errorp->addr); |
| 6248 | tt = (sparcv9_trap_type_t)N1_trap_data_error; |
| 6249 | v9p->post_precise_trap(esp, tt); |
| 6250 | break; |
| 6251 | } |
| 6252 | nsp->error.addr = L1_PA(errorp->addr); |
| 6253 | if (nsp->error.enabled & NA_NCEEN) { |
| 6254 | tt = (ep->type == IFETCH) |
| 6255 | ? Sparcv9_trap_instruction_access_error |
| 6256 | : Sparcv9_trap_data_access_error; |
| 6257 | v9p->post_precise_trap(sp, tt); |
| 6258 | } |
| 6259 | } |
| 6260 | if (remove_errconf(sp, ep) == NULL) |
| 6261 | clear_errflags(sp); else update_errflags(sp); |
| 6262 | break; |
| 6263 | case DSU: |
| 6264 | case DBU9: |
| 6265 | case DRAM: |
| 6266 | if (remove_errconf(sp, ep) == NULL) clear_errflags(sp); |
| 6267 | IMPL_WARNING(("Unimplemented Error Type: %s\n", ep->type_namep)); |
| 6268 | break; |
| 6269 | |
| 6270 | default: |
| 6271 | if (remove_errconf(sp, ep) == NULL) clear_errflags(sp); |
| 6272 | EXEC_WARNING(("Unspecified Error Type: %s\n", ep->type_namep)); |
| 6273 | break; |
| 6274 | } |
| 6275 | } |
| 6276 | #endif |
| 6277 | |