* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: niagara2.c
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
#pragma ident "@(#)niagara2.c 1.79 07/10/12 SMI"
#include <string.h> /* memcpy/memset */
#ifdef FP_DECODE_DISABLED
#define FP_DECODE_FPU_ON_CHECK \
if (!((sparcv9_cpu_t*)(sp->specificp))->fpu_on) goto n2_fp_disabled
#else /* FP_DECODE_DISABLED */
#define FP_DECODE_FPU_ON_CHECK
#endif /* FP_DECODE_DISABLED */
static void niagara2_init_trap_list();
static bool_t
niagara2_init_proc_type(proc_type_t
* proc_typep
);
static op_funcp
niagara2_decode_me(simcpu_t
*sp
, xicache_instn_t
* xcip
, uint32_t instn
);
static void niagara2_get_pseudo_dev(config_proc_t
*config_procp
, char *dev_namep
, void *devp
);
void niagara2_send_xirq(simcpu_t
* sp
, ss_proc_t
* tnpp
, uint64_t val
);
static uint64_t niagara2_ext_signal(config_proc_t
* config_procp
, ext_sig_t sigtype
, void *vp
);
static bool_t
ss_error_asi_noop_access(simcpu_t
*, maccess_t
, uint_t
, uint_t
, bool_t
, tvaddr_t
);
static void niagara2_domain_check(domain_t
*domainp
);
static void niagara2_init_trap_list()
* The SunSPARC traps are named with the prefix 'SS_', the N2 traps are prefixed with 'N2_'.
* The assignment of TT, priorities(both absolute and relative), and trap delivery mode
* are based on N2 PRM, Rev. 1.0, 8/9/2005
static ss_trap_list_t setup_list
[] = {
/* Priorities 0 = highest, XX = Lowest */
/* Number Name Priority User Priv HPriv */
/* 0x00 */ { T( legion_save_state
), Pri( 0, 0), H
, H
, H
},
/* 0x01 */ { T( power_on_reset
), Pri( 0, 0), H
, H
, H
},
/* 0x02 */ { T( watchdog_reset
), Pri( 1, 2), H
, H
, H
},
/* 0x03 */ { T( externally_initiated_reset
), Pri( 1, 1), H
, H
, H
},
/* 0x04 */ { T( software_initiated_reset
), Pri( 1, 3), H
, H
, H
},
/* 0x05 */ { T( RED_state_exception
), Pri( 1, 4), H
, H
, H
},
/* 0x07 */ { TN2( store_error
), Pri( 2, 1), H
, H
, H
},
/* 0x08 */ { T( IAE_privilege_violation
), Pri( 3, 1), H
, X
, X
},
/* 0x09 */ { T( instruction_access_MMU_miss
), Pri( 2, 8), H
, H
, X
},
/* 0x0a */ { T( instruction_access_error
), Pri( 4, 0), H
, H
, H
},
/* 0x0b */ { T( IAE_unauth_access
), Pri( 2, 9), H
, H
, X
},
/* 0x0c */ { T( IAE_NFO_page
), Pri( 3, 3), H
, H
, X
},
/* 0x0d */ { TN2( instruction_address_range
), Pri( 2, 6), H
, H
, UH
},
/* 0x0d */ { TN2( instruction_real_range
), Pri( 2, 6), H
, H
, UH
},
/* 0x10 */ { T( illegal_instruction
), Pri( 6, 1), H
, H
, H
},
/* 0x11 */ { T( privileged_opcode
), Pri( 7, 0), P
, X
, X
},
/* LDD and STD are in fact implemented by niagara */
/* 0x12 */ { T( unimplemented_LDD
), Pri( 6, 0), X
, X
, X
}, /* error if received by hypervisor. */
/* 0x13 */ { T( unimplemented_STD
), Pri( 6, 0), X
, X
, X
}, /* error if received by hypervisor. */
/* 0x14 */ { T( DAE_invalid_ASI
), Pri(12, 1), H
, H
, UH
},
/* 0x15 */ { T( DAE_privilege_violation
), Pri(12, 4), H
, H
, UH
},
/* 0x16 */ { T( DAE_nc_page
), Pri(12, 5), H
, H
, UH
},
/* 0x17 */ { T( DAE_NFO_page
), Pri(12, 6), H
, H
, UH
},
/* 0x20 */ { T( fp_disabled
), Pri( 8, 1), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x21 */ { T( fp_exception_ieee_754
), Pri(11, 1), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x22 */ { T( fp_exception_other
), Pri(11, 1), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x23 */ { T( tag_overflow
), Pri(14, 0), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x24 */ { T( clean_window
), Pri(10, 1), P
, P
, UH
}, /* error if received by hypervisor - windows not used. */
/* 0x28 */ { T( division_by_zero
), Pri(15, 0), P
, P
, UH
}, /* error if received by hypervisor. */
/* 0x29 */ { T( internal_processor_error
), Pri( 8, 2), H
, H
, H
}, /* generated by register parity errors */
/* 0x2a */ { T( instruction_invalid_TSB_entry
), Pri( 2, 10), H
, H
, X
},
/* 0x2b */ { T( data_invalid_TSB_entry
), Pri(12, 3), H
, H
, H
},
/* 0x2d */ { TN2( mem_real_range
), Pri(11, 3), H
, H
, UH
},
/* 0x2e */ { TN2( mem_address_range
), Pri(11, 3), H
, H
, UH
},
/* 0x30 */ { T( DAE_so_page
), Pri(12, 6), H
, H
, UH
},
/* 0x31 */ { T( data_access_MMU_miss
), Pri(12, 3), H
, H
, H
},
/* 0x32 */ { T( data_access_error
), Pri(12, 9), H
, H
, H
}, /* handle error and generate report to appropriate supervisor. */
/* 0x34 */ { T( mem_address_not_aligned
), Pri(10, 2), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x35 */ { T( LDDF_mem_address_not_aligned
), Pri(10, 1), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x36 */ { T( STDF_mem_address_not_aligned
), Pri(10, 1), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x37 */ { T( privileged_action
), Pri(11, 1), H
, H
, X
}, /* error if received from hypervisor. */
/* 0x38 */ { T( LDQF_mem_address_not_aligned
), Pri(10, 1), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x39 */ { T( STQF_mem_address_not_aligned
), Pri(10, 1), H
, H
, UH
}, /* error if received by hypervisor. */
/* 0x3b */ { TN2( unsupported_page_size
), Pri(13, 0), H
, H
, UH
},
/* 0x3c */ { TN2( control_word_queue_interrupt
), Pri(16, 5), H
, H
, H
},
/* 0x3d */ { TN2( modular_arithmetic_interrupt
), Pri(16, 4), H
, H
, H
},
/* 0x3e */ { T( instruction_real_translation_miss
), Pri( 2, 8), H
, H
, X
}, /* real to pa entry not found in ITLB */
/* 0x3f */ { T( data_real_translation_miss
), Pri(12, 3), H
, H
, H
}, /* real to pa entry not found in DTLB */
/* this one ever generated ? */
/* 0x40 */ { T( sw_recoverable_error
), Pri(33, 1), H
, H
, H
},
/* 0x41 */ { T( interrupt_level_1
), Pri(31, 0), P
, P
, X
},
/* 0x42 */ { T( interrupt_level_2
), Pri(30, 0), P
, P
, X
},
/* 0x43 */ { T( interrupt_level_3
), Pri(29, 0), P
, P
, X
},
/* 0x44 */ { T( interrupt_level_4
), Pri(28, 0), P
, P
, X
},
/* 0x45 */ { T( interrupt_level_5
), Pri(27, 0), P
, P
, X
},
/* 0x46 */ { T( interrupt_level_6
), Pri(26, 0), P
, P
, X
},
/* 0x47 */ { T( interrupt_level_7
), Pri(25, 0), P
, P
, X
},
/* 0x48 */ { T( interrupt_level_8
), Pri(24, 0), P
, P
, X
},
/* 0x49 */ { T( interrupt_level_9
), Pri(23, 0), P
, P
, X
},
/* 0x4a */ { T( interrupt_level_a
), Pri(22, 0), P
, P
, X
},
/* 0x4b */ { T( interrupt_level_b
), Pri(21, 0), P
, P
, X
},
/* 0x4c */ { T( interrupt_level_c
), Pri(20, 0), P
, P
, X
},
/* 0x4d */ { T( interrupt_level_d
), Pri(19, 0), P
, P
, X
},
/* 0x4e */ { T( interrupt_level_e
), Pri(18, 0), P
, P
, X
},
/* 0x4f */ { T( interrupt_level_f
), Pri(17, 0), P
, P
, X
},
/* 0x5e */ { T( hstick_match
), Pri(16, 1), H
, H
, H
},
/* 0x5f */ { T( trap_level_zero
), Pri( 2, 2), H
, H
, X
}, /* This trap requires TL==0, priv==1 and hpriv==0 */
/* 0x60 */ { T( interrupt_vector_trap
), Pri(16, 3), H
, H
, H
}, /* handle & remap to sun4v as appropriate mondo queue */
/* 0x61 */ { T( RA_watchpoint
), Pri(12, 8), H
, H
, H
},
/* 0x62 */ { T( VA_watchpoint
), Pri(11, 2), P
, P
, X
}, /* error - VA watchpoints should be pended if hpriv=1 */
/* 0x63 */ { T( hw_corrected_error
), Pri(33, 2), H
, H
, H
},
/* 0x64 */ { T( fast_instruction_access_MMU_miss
), Pri( 2, 8), H
, H
, X
},
/* 0x68 */ { T( fast_data_access_MMU_miss
), Pri(12, 3), H
, H
, H
},
/* 0x6c */ { T( fast_data_access_protection
), Pri(12, 7), H
, H
, H
},
/* 0x71 */ { T( instruction_access_MMU_error
), Pri( 2, 7), H
, H
, X
},
/* 0x72 */ { T( data_access_MMU_error
), Pri(12, 2), H
, H
, H
},
/* 0x74 */ { T( control_transfer_instruction
), Pri(11, 1), P
, P
, H
},
/* 0x75 */ { T( instruction_VA_watchpoint
), Pri( 2, 5), P
, P
, X
},
/* 0x76 */ { T( instruction_breakpoint
), Pri( 6, 2), H
, H
, H
},
/* 0x7c */ { T( cpu_mondo_trap
), Pri(16, 6), P
, P
, X
},
/* 0x7d */ { T( dev_mondo_trap
), Pri(16, 7), P
, P
, X
},
/* 0x7e */ { T( resumable_error
), Pri(33, 3), P
, P
, X
},
/* faked by the hypervisor */
/* 0x7f */ { T( nonresumable_error
), Pri( 4, 0), SW
, SW
, SW
},
/* 0x80 */ { T( spill_0_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x84 */ { T( spill_1_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x88 */ { T( spill_2_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x8c */ { T( spill_3_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x90 */ { T( spill_4_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x94 */ { T( spill_5_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x98 */ { T( spill_6_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0x9c */ { T( spill_7_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xa0 */ { T( spill_0_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xa4 */ { T( spill_1_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xa8 */ { T( spill_2_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xac */ { T( spill_3_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xb0 */ { T( spill_4_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xb4 */ { T( spill_5_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xb8 */ { T( spill_6_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xbc */ { T( spill_7_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xc0 */ { T( fill_0_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xc4 */ { T( fill_1_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xc8 */ { T( fill_2_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xcc */ { T( fill_3_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xd0 */ { T( fill_4_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xd4 */ { T( fill_5_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xd8 */ { T( fill_6_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xdc */ { T( fill_7_normal
), Pri( 9, 0), P
, P
, UH
},
/* 0xe0 */ { T( fill_0_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xe4 */ { T( fill_1_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xe8 */ { T( fill_2_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xec */ { T( fill_3_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xf0 */ { T( fill_4_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xf4 */ { T( fill_5_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xf8 */ { T( fill_6_other
), Pri( 9, 0), P
, P
, UH
},
/* 0xfc */ { T( fill_7_other
), Pri( 9, 0), P
, P
, UH
},
/*0x100-0x17f*/{T( trap_instruction
), Pri( 16,2), P
, P
, H
}, /* hv1: handles hypervisor traps only. Error if received from hypervisor. */
/*0x180-0x1ff*/{T( htrap_instruction
), Pri( 16,2), X
, H
, UH
}, /* used to implement the supervisor to hypervisor API call. */
for (i
=0; setup_list
[i
].trap_type
!= -1; i
++) {
ASSERT( setup_list
[i
].trap_type
>=SS_trap_legion_save_state
&& setup_list
[i
].trap_type
<SS_trap_illegal_value
);
ss_trap_list
[ setup_list
[i
].trap_type
] = setup_list
[i
];
/* Now clone the trap instruction entries */
for (i
=0x101; i
<0x180; i
++) {
ss_trap_list
[ i
] = ss_trap_list
[ 0x100 ];
ss_trap_list
[ i
].trap_type
= i
;
for (i
=0x181; i
<0x200; i
++) {
ss_trap_list
[ i
] = ss_trap_list
[ 0x180 ];
ss_trap_list
[ i
].trap_type
= i
;
#define PROCESSOR_NAME "vfalls"
#define PROCESSOR_TYPE proc_type_vfalls
#define PROCESSOR_NAME "niagara2"
#define PROCESSOR_TYPE proc_type_niagara2
extern struct fpsim_functions fpsim_funclist
;
proc_type_t PROCESSOR_TYPE
={
false, /* module initialised */
/* execution support functions */
#if ERROR_TRAP_GEN /* { */
/* pointer to fpsim instructions */
/* performance measuring funcs */
/* dump tlb, instruction counts etc */
ss_dump_instruction_counts
,
/* external interface methods */
/* debugger interface methods */
NULL
, /* debug_hook_dumpp */
* Returns false if error initialising module, true if init was OK
bool_t
niagara2_init_proc_type(proc_type_t
* proctp
)
if (proctp
->flag_initialised
) {
warning("Initialisation of module %s more than once - bailing", proctp
->proc_type_namep
);
/* stuff here we only need to do once if we want to use this module */
niagara2_init_trap_list();
proctp
->flag_initialised
= true;
niagara2_init_error_list();
* This function fills the PA<39:0> field of the TSB pointer registers with the
* current data stored in the Tag access register and appropriate TSB config
* register. See 13.11.12 of N2 PRM, rev 0.6, for more details.
static uint64_t ss_make_tsb_pointer(tvaddr_t va
, ss_tsb_info_t
*tsb_config_regp
)
uint_t ps
, n
, shift
, tte_idx
;
n
= tsb_config_regp
->tsb_size
;
ps
= tsb_config_regp
->page_size
;
shift
= SUN4V_PAGE_OFFSET(ps
);
vpn
= va
& SUN4V_VPN_MASK(ps
);
tte_idx
= (vpn
& SUN4V_TTE_IDX_MASK(n
,ps
)) >> shift
;
tte_addr
= tsb_config_regp
->tsb_base
| (tte_idx
<< 4);
uint8_t * ss_make_tsb_pointer_int(tvaddr_t va
, ss_tsb_info_t
*tsb_config_regp
)
uint_t ps
, n
, shift
, tte_idx
;
n
= tsb_config_regp
->tsb_size
;
ps
= tsb_config_regp
->page_size
;
shift
= SUN4V_PAGE_OFFSET(ps
);
vpn
= va
& SUN4V_VPN_MASK(ps
);
tte_idx
= (vpn
& SUN4V_TTE_IDX_MASK(n
,ps
)) >> shift
;
tte_addr
= tsb_config_regp
->tsb_base_sim
+ (tte_idx
<< 4);
* Converts an RA to a PA during HW tablewalk based on the configuration of
* the MMU Real Range and MMU Physical Offset registers.
* Returns true if a translation was found, false otherwise.
bool_t
ss_hwtw_convert_ra_to_pa(tvaddr_t ra
, tvaddr_t
* pa
, ss_strand_t
*nsp
, uint_t ps
) {
tvaddr_t tsb_lb_addr
, tsb_ub_addr
;
uint64_t page_size
= 1 << SUN4V_PAGE_SIZE_SHIFT(ps
);
* sanity check on bits 55:40 of the RA (must be zero)
if ((ra
>> (SUN4V_PN_UBIT
+1)) != 0)
* check if ra lies in one of four ranges specified by the range registers
* check the enable bit of the real range register
if (!(nsp
->real_range_reg
[i
]>>63)) continue;
tsb_lb_addr
= (nsp
->real_range_reg
[i
] & MASK64(26, 0)) << SUN4V_PN_LBIT
;
tsb_ub_addr
= ((nsp
->real_range_reg
[i
] & MASK64(53, 27)) >> 27) << SUN4V_PN_LBIT
;
if ((ra
< tsb_lb_addr
) || ((ra
+page_size
) > tsb_ub_addr
)) continue;
*pa
= ra
+ (nsp
->phy_off_reg
[i
] & SUN4V_PN_MASK
);
* This function is almost identical to Niagara's ss_tlb_insert() except that it
* - works for the sun4v TTE format (the only format supported by N2).
* - supports a lock-free TLB update (as opposed to the 'locked down' feature in N1)
* - autodemaps pages of the same size or larger (N1 demaps pages of same, larger or smaller size)
* - returns various TTE bits, packed in 'flags', and pa_offset to the TLB miss handler
/* This is the encoding used for parity generation */
static uint8_t niagara2_size_encoding
[8] = { 0, 1, 0, 3, 0, 7 };
ss_trap_type_t
ss_tlb_insert(simcpu_t
* sp
, ss_mmu_t
* mmup
, ss_tlb_t
* tlbp
,
uint_t partid
, bool_t is_real
, uint64_t data
, uint_t
*flags
, uint64_t *pa_offset
)
matchcontext_t match_context
;
bool_t need_flush
= false;
data
&= NIAGARA2_DATA_IN_MASK
;
* figure out the useful info about the page to insert
size
= SUN4V_TTED_PS(data
);
shift
= SUN4V_PAGE_SIZE_SHIFT(size
);
return N2_trap_unsupported_page_size
;
~((1ULL << SUN4V_TTED_V_BIT
)|MASK64(3,0))) |
niagara2_size_encoding
[size
]) <<
NIAGARA2_DATA_ACCESS_PAR_BIT
;
* This is VERY important:
* The tag access register need NOT contain a correctly aligned tag entry
* for the given page size. So it is REALLY IMPORTANT when forming the TLB
* entry tag field that we correctly mask off the lower bits corresponding to
* the selected page size. This especially important because we use this value to
* compute a va-pa offset.
* Note: we do a similar mask operation later when using the PA to compute the
* offset value we create.
tag
= mmup
->tag_access_reg
& MASK64(63,shift
);
NIAGARA2_REAL_CONTEXT
: mmup
->tag_access_reg
& MASK64(12,0);
match_context
= is_real
? SS_TLB_REAL_CONTEXT
: tag_context
;
RW_wrlock(&tlbp
->rwlock
);
* Do autodemap: demap the old TLB entry whose page size is of
* the same or larger than the new entry.
tep
= &(tlbp
->tlb_entryp
[0]);
for (i
=tlbp
->nentries
; i
>0; i
--, tep
++) {
xor = tep
->tag_pfn
^ tag
;
if (tep
->match_shift
>= shift
&& (xor>>tep
->match_shift
)==0 &&
tep
->match_context
== match_context
&& tep
->partid
== partid
) {
DBGMMU( lprintf(sp
->gid
, "ss_tlb_insert: autodemap %c-TLB: old sz=0x%x new sz=0x%x\n",
mmup
->is_immu
? 'I' : 'D', SUN4V_TTED_PS(tep
->data
), size
); );
* matching entry - put back on the free list
ss_tlb_unhash(tlbp
, tep
);
ss_free_tlb_entry( tlbp
, tep
);
tlb_entry_error_match(sp
, mmup
, tep
);
* replace an entry chosen randomly or in a Round Robin fashion
if (tep
== (tlb_entry_t
*)0) {
#if SS_TLB_REPLACE_RANDOM /* { */
i
= random() % tlbp
->nentries
;
tep
= &(tlbp
->tlb_entryp
[i
]);
#elif SS_TLB_REPLACE_RROBIN /* } { */
i
= tlbp
->last_replaced
+ 1;
if (i
>=tlbp
->nentries
) i
=0; /* wrap */
tep
= &(tlbp
->tlb_entryp
[i
]);
#error Need to define TLB replacement alg
/* put back on the free list */
ss_tlb_unhash(tlbp
, tep
);
ss_free_tlb_entry( tlbp
, tep
);
* free entry must be invalid !
ASSERT(!SUN4V_TTED_V(tep
->data
));
tlbp
->freep
= tep
->nextp
;
tep
->match_context
= match_context
;
tep
->match_shift
= shift
;
tep
->tag_context
= tag_context
;
* Note: variable size mask again based on page size
tep
->pa_offset
= (data
& MASK64(39,shift
)) - tag
;
/* niagara2 doesn't have read and exec bits */
tep
->flags
= SS_TLB_FLAG_EXEC
;
tep
->flags
= SS_TLB_FLAG_READ
;
if (SUN4V_TTED_W(data
)) tep
->flags
|= SS_TLB_FLAG_WRITE
;
if (SUN4V_TTED_P(data
)) tep
->flags
|= SS_TLB_FLAG_PRIV
;
if (SUN4V_TTED_CP(data
)) tep
->flags
|= SS_TLB_FLAG_CP
;
if (SUN4V_TTED_E(data
)) tep
->flags
|= SS_TLB_FLAG_E
;
if (SUN4V_TTED_NFO(data
))tep
->flags
|= SS_TLB_FLAG_NFO
;
if (SUN4V_TTED_IE(data
)) tep
->flags
|= SS_TLB_FLAG_IE
;
/* Finally insert the new entry into the hash table for the TLB */
/* Hash uses match_context so it skews real->phys entries away from context 0 */
i
= tag
>> SS_MAX_PAGE_SIZE_BITS
;
i
+= match_context
+ partid
;
/* PRM notes that inserting with V = 0 operates like V = 1 */
if (SUN4V_TTED_V(data
)) {
tep
->hashidx
= i
; /* to help with unhooking later */
tep
->nextp
= tlbp
->hash
[i
].ptr
;
tep
->nextp
= tlbp
->freep
;
DBGMMU(lprintf(sp
->gid
,"ss_tlb_insert: %c-TLB: sun4v tte=%llx [nfo=%d e=%d cp=%d p=%d ep=%d w=%d sz=0x%x]\n",
mmup
->is_immu
? 'I' : 'D', data
,
SUN4V_TTED_NFO(data
), SUN4V_TTED_E(data
), SUN4V_TTED_CP(data
), SUN4V_TTED_P(data
),
SUN4V_TTED_EP(data
), SUN4V_TTED_W(data
), SUN4V_TTED_PS(data
));
lprintf(sp
->gid
, "\tpart=0x%x tag=%p ctx=%x/%x offset=%llx shift=%d flags=0x%x\n",
partid
, tag
, tag_context
, match_context
, tep
->pa_offset
, tep
->match_shift
,tep
->flags
); );
* return tep->flags and tep->pa_offset
*pa_offset
= tep
->pa_offset
;
RW_unlock(&tlbp
->rwlock
);
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
ss_tlb_flush_shares(sp
, tlbp
, mmup
->is_immu
);
ss_trap_type_t
ss_tlb_insert_idx(simcpu_t
* sp
, ss_mmu_t
* mmup
,
ss_tlb_t
* tlbp
, uint_t partid
, bool_t is_real
, uint64_t data
, uint_t idx1
)
matchcontext_t match_context
;
bool_t need_flush
= false;
* mask out bits which are ignored in Niagara2 HW.
* Since this is a direct store to the TLB, the mask includes
* TODO: confirm that parity is not generated for data access.
uint64_t tte_data
= data
& NIAGARA2_DATA_ACCESS_MASK
;
* figure out the useful info about the page to insert
size
= SUN4V_TTED_PS(tte_data
);
shift
= SUN4V_PAGE_SIZE_SHIFT(size
);
return N2_trap_unsupported_page_size
;
tag
= mmup
->tag_access_reg
& MASK64(63,shift
);
NIAGARA2_REAL_CONTEXT
: mmup
->tag_access_reg
& MASK64(12,0);
match_context
= is_real
? SS_TLB_REAL_CONTEXT
: tag_context
;
* Hash uses match_context so it skews real->phys entries away
idx
= tag
>> SS_MAX_PAGE_SIZE_BITS
;
idx
+= match_context
+ partid
;
RW_wrlock(&tlbp
->rwlock
);
tep
= &(tlbp
->tlb_entryp
[idx1
]);
if (tep
->hashidx
!= -1) {
ss_tlb_unhash(tlbp
, tep
);
ss_tlb_unfree(tlbp
, tep
);
/* overwrite the existing entry */
tep
->match_context
= match_context
;
tep
->match_shift
= shift
;
tep
->tag_context
= tag_context
;
tep
->pa_offset
= (tte_data
& MASK64(39,shift
)) - tag
;
DBGMMU( lprintf(sp
->gid
, "ss_tlb_insert_idx: %c-TLB[0x%x]: sun4v tte=%llx "
"[nfo=%d e=%d cp=%d p=%d ep=%d w=%d sz=0x%x]\n",
mmup
->is_immu
? 'I' : 'D', idx1
, data
,
SUN4V_TTED_NFO(tte_data
), SUN4V_TTED_E(tte_data
),
SUN4V_TTED_CP(tte_data
), SUN4V_TTED_P(tte_data
),
SUN4V_TTED_EP(tte_data
), SUN4V_TTED_W(tte_data
),
SUN4V_TTED_PS(tte_data
));
lprintf(sp
->gid
, "\tpart=0x%x tag=%p ctx=%x/%x offset=%llx "
partid
, tag
, tag_context
, match_context
, tep
->pa_offset
,
tep
->match_shift
,tep
->flags
); );
/* niagara2 doesn't have read and exec bits */
tep
->flags
= SS_TLB_FLAG_EXEC
;
tep
->flags
= SS_TLB_FLAG_READ
;
if (SUN4V_TTED_W(tte_data
)) tep
->flags
|= SS_TLB_FLAG_WRITE
;
if (SUN4V_TTED_P(tte_data
)) tep
->flags
|= SS_TLB_FLAG_PRIV
;
if (SUN4V_TTED_CP(tte_data
)) tep
->flags
|= SS_TLB_FLAG_CP
;
if (SUN4V_TTED_E(tte_data
)) tep
->flags
|= SS_TLB_FLAG_E
;
if (SUN4V_TTED_NFO(tte_data
))tep
->flags
|= SS_TLB_FLAG_NFO
;
if (SUN4V_TTED_IE(tte_data
)) tep
->flags
|= SS_TLB_FLAG_IE
;
* Finally re-insert the entry into the hash table for the TLB,
* if valid. Direct writes may write any bit pattern.
if (SUN4V_TTED_V(tte_data
)) {
tep
->hashidx
= idx
; /* to help with unhooking later */
tep
->nextp
= tlbp
->hash
[idx
].ptr
;
tlbp
->hash
[idx
].ptr
= tep
;
tep
->nextp
= tlbp
->freep
;
RW_unlock(&tlbp
->rwlock
);
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
ss_tlb_flush_shares(sp
, tlbp
, mmup
->is_immu
);
* Fill in the TSB config register with the given data.
void niagara2_write_tsb_config(simcpu_t
*sp
, ss_tsb_info_t
*tsb_config_reg
, uint64_t data
)
tsb_config_reg
->data
= data
;
tsb_config_reg
->enable
= ((data
>> 63)&1) ? true : false;
tsb_config_reg
->use_context_0
= ((data
>> 62)&1) ? true : false;
tsb_config_reg
->use_context_1
= ((data
>> 61)&1) ? true : false;
tsb_config_reg
->ra_not_pa
= ((data
>> 8)&1) ? true : false;
/* MASK should be (7, 4) and check for unsupported page size */
tsb_config_reg
->page_size
= (data
& MASK64(6, 4)) >> 4;
shift
= SUN4V_PAGE_SIZE_SHIFT(tsb_config_reg
->page_size
);
tsb_config_reg
->tag_match_shift
= shift
- 22;
tsb_config_reg
->tag_match_shift
= 0;
tsb_config_reg
->tsb_size
= data
& MASK64(3, 0);
tsb_config_reg
->tsb_base
= data
&
SUN4V_TSB_BASE_MASK(tsb_config_reg
->tsb_size
);
if (tsb_config_reg
->enable
)
tsb_config_reg
->tsb_base_sim
= ss_hwtw_find_base(sp
, tsb_config_reg
);
tsb_config_reg
->tsb_base_sim
= NULL
;
* We arrive here because:
* 1) a malformed (unaligned PC)
void ss_xic_miss(simcpu_t
* sp
, xicache_line_t
* xc_linep
, tvaddr_t pc
)
uint_t context
, context_type
, miss_context
;
bool_t search_tlb_again
= false;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
nsp
= v9p
->impl_specificp
;
npp
= sp
->config_procp
->procp
;
* determine context in terms of TL
miss_context
= context
= SS_NUCLEUS_CONTEXT
;
context_type
= ss_ctx_nucleus
;
miss_context
= context
= nsp
->pri_context
;
context_type
= ss_ctx_primary
;
if (nsp
->pri_context
!= nsp
->pri_context1
)
/* The PC always has bits 0 & 1 zero */
/* align the pc to the start of the XC line */
tag
= va
& XICACHE_TAG_PURE_MASK
;
* Perform a virtual to physical translation
* so we can determine if we are dealing with
* a TLB miss or simply an x-cache miss.
/* Find the pa corresponding to the line we need */
/* We assume that for SunSPARC, the TLB is off in Hyper priv mode */
/* FIXME: we should probably do this by swizzling a function pointer */
/* for this when we change mode, rather that having an if here ... fix later */
if (v9p
->pstate
.addr_mask
) {
/* NOTE: we dont mask tag ... we allow that to match the 64bit address */
tlb_entry_t
*tep
, *tmp_tep
;
ss_trap_type_t miss_trap_type
;
/* If MMU disabled, but we're in priv/user mode use real addresses */
if (!nsp
->immu
.enabled
) {
context
= SS_TLB_REAL_CONTEXT
;
search_tlb_again
= false;
* check out of range address (if lie within the "VA hole"
if ((va
>= SS_VA_HOLE_LB
) && (va
<= SS_VA_HOLE_UB
)) {
* setup the right trap type
if (context
== SS_TLB_REAL_CONTEXT
)
tt
= N2_trap_instruction_real_range
;
tt
= N2_trap_instruction_address_range
;
SET_ITLB_FAULT( nsp
, VA48(va
) );
nsp
->immu
.tag_access_reg
= (VA48(va
) & ~MASK64(12,0)) |
DBGMMU( lprintf(sp
->gid
, "IMMU tag access = 0x%llx\n", nsp
->immu
.tag_access_reg
); );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)tt
);
RW_rdlock(&tlbp
->rwlock
);
/* FIXME: Need a better hash than this ! */
idx
= va
>> SS_MAX_PAGE_SIZE_BITS
;
idx
+= context
+ nsp
->partid
;
* So we search for a matching page using the info we have in the
* hash - while another thread might possibly be removing or
* inserting an entry into the same table.
for ( tep
= tlbp
->hash
[idx
].ptr
; tep
!=(tlb_entry_t
*)0; tep
= tep
->nextp
) {
/* try and match the entry as appropriate */
if (((tep
->tag_pfn
^ va
)>>tep
->match_shift
)==0 && tep
->match_context
==context
&& tep
->partid
== partid
) goto itlb_match
;
* Might need to search the TLB one more time based
* on the shared context value.
search_tlb_again
= false;
context
= nsp
->pri_context1
;
RW_unlock(&tlbp
->rwlock
);
DBGMISS( lprintf(sp
->gid
, "itlb miss: pc=%lx va=%lx ctx=%x\n", pc
, va
, miss_context
); );
* If the MMU is "disabled" in privileged mode ... this is a real miss, not a
* virtual translation miss, so the fault context and trap type is different
miss_trap_type
= ss_hardware_tablewalk(sp
, &(nsp
->immu
), tlbp
, va
,
context_type
, &flags
, &pa_offset
);
if (miss_trap_type
== SS_trap_NONE
) {
miss_context
= 0; /* null for ra->pa miss undefined ? */
miss_trap_type
= SS_trap_instruction_real_translation_miss
;
SET_ITLB_FAULT( nsp
, va
);
nsp
->immu
.tag_access_reg
= (va
& ~MASK64(12,0)) | miss_context
; /* FIXME: - do properly later */
DBGMMU( lprintf(sp
->gid
, "miss_trap_type=0x%x "
"IMMU tag access = 0x%llx\n",
miss_trap_type
, nsp
->immu
.tag_access_reg
); );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)miss_trap_type
);
* try and match the entry again for multi-hit
for (tmp_tep
= tep
->nextp
; tmp_tep
!= (tlb_entry_t
*)0; tmp_tep
= tmp_tep
->nextp
) {
if (((tmp_tep
->tag_pfn
^ va
) >> tmp_tep
->match_shift
) == 0
&& tmp_tep
->match_context
== context
&& tmp_tep
->partid
== partid
) {
RW_unlock(&tlbp
->rwlock
);
DBGMMU( lprintf(sp
->gid
, "itlb miss multi-hit: pc=%lx va=%lx ctx=%x\n",
DBGMMU( lprintf(sp
->gid
, " 0x%x %d 0x%llx 0x%llx\n",
tep
->tag_context
, tep
->match_shift
, tep
->tag_pfn
,
tep
->tag_pfn
+ tep
->pa_offset
); );
DBGMMU( lprintf(sp
->gid
, " 0x%x %d 0x%llx 0x%llx\n",
tmp_tep
->tag_context
, tmp_tep
->match_shift
,
tmp_tep
->tag_pfn
, tmp_tep
->tag_pfn
+ tmp_tep
->pa_offset
); );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)SS_trap_instruction_access_MMU_error
);
pa_tag
+= tep
->pa_offset
;
RW_unlock(&tlbp
->rwlock
);
* Errors on itlb hit: stash table_entry pointer and if
* subsequent itlb hit on same entry post error again.
if (itlb_hit_error_match(sp
, tep
))
if ( (flags
& SS_TLB_FLAG_PRIV
) && v9p
->state
== V9_User
) {
miss_trap_type
= SS_trap_IAE_privilege_violation
;
if (flags
& SS_TLB_FLAG_NFO
) {
miss_trap_type
= SS_trap_IAE_NFO_page
;
/* Niagara 2 only implements 40 bits of PA, the tlb code
masks PA so here we need to mask bypass PAs */
* OK - now go get the instructions to fill in the xc-line
* ... start by finding the device that has the
* optimise: by guessing at the last device found.
/* now find the device - looking in the cache first */
cap
= sp
->xic_miss_addrp
;
if (!(cap
&& (cap
->baseaddr
<= pa
) && (pa
< cap
->topaddr
))) {
config_proc_t
* config_procp
;
config_procp
= sp
->config_procp
;
domainp
= config_procp
->domainp
;
cap
= find_domain_address(domainp
, pa
);
/* OK it's a bus error there was no backing store */
fatal("bus error - instruction fetch from pc=0x%llx "
"(cacheline va=0x%llx -> physical 0x%llx)", pc
, va
, pa
); /* FIXME */
sp
->xic_miss_addrp
= cap
; /* cache for next time */
/* try and get the buffer pointer */
extent
= cap
->config_devp
->dev_typep
->dev_cacheable(cap
, DA_Instn
, pa_tag
-cap
->baseaddr
, &bufp
);
if (extent
< XICACHE_LINE_SIZE
) {
/* bus error again ? or fill from multiple devices ? */
fatal("fix bus error 2");
* Errors on ifetch to icache or L2 cache
* Make sure the L2 cache is enabled
xicache_error_match(sp
, pa
);
xc_linep
->tag
= tag
| sp
->tagstate
;
xc_linep
->memoryoffset
= ((uint64_t)bufp
)-tag
;
* FIXME: If breakpoints are in use make sure we really clear the decoded line
* to ensure that we dont get instruction aliasing. XI-cache prob. needs a re-design
* from this standpoint - but this will wait until we complete the JIT version.
* Until then this is a reminder and a place holder.
if (sp
->bp_infop
) xicache_clobber_line_decodes(sp
, tag
);
xicache_line_fill_risc4(sp
, xc_linep
, tag
, bufp
);
static char valid_asi_map
[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, /* 0? */
1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, /* 1? */
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, /* 2? */
1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, /* 3? */
1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, /* 4? */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 5? */
0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* 6? */
0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7? */
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /* 8? */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9? */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a? */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b? */
1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, /* c? */
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, /* d? */
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, /* e? */
1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, /* f? */
* This is not the worlds most efficient routine, but then we assume that ASI's are
* not frequently occurring memory access types - we may have to fast path the
* ASI_AS_IF_USER_PRIMARY etc. some how if used frequently by kernel b-copy.
ss_asi_access(simcpu_t
* sp
, maccess_t op
, uint_t regnum
, uint_t asi
,
uint64_t reg1
, uint64_t reg2
, asi_flag_t asi_flag
)
ss_tsb_info_t
* tsbinfop
, * tsbinfop1
;
uint_t context_type
, idx
;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
nsp
= v9p
->impl_specificp
;
npp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
ASSERT(0LL==sp
->intreg
[Reg_sparcv9_g0
]);
if (asi
== V9_ASI_IMPLICIT
)
goto no_asi_valid_checks
;
* First check if this is a legitimate ASI based
* on current privilege level.
* Niagara 2 prioritizes invalid ASI over privileged action.
if (!valid_asi_map
[asi
]) {
v9p
->post_precise_trap(sp
,
(sparcv9_trap_type_t
)SS_trap_DAE_invalid_ASI
);
ASSERT( !v9p
->pstate
.priv
&& !v9p
->hpstate
.hpriv
);
v9p
->post_precise_trap(sp
, Sparcv9_trap_privileged_action
);
ASSERT( v9p
->pstate
.priv
&& !v9p
->hpstate
.hpriv
);
if (asi
>=0x30 && asi
<0x80) {
/* ASIs reserved for hpriv mode appear to priv mode as data access exceptions */
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)SS_trap_privileged_action
);
ASSERT( v9p
->hpstate
.hpriv
);
ASSERT( v9p
->hpstate
.red
);
* Next pull out all the memory access ASIs ...
mflags
= (V9_User
!= v9p
->state
) ? MF_Has_Priv
: 0;
context_type
= ss_ctx_reserved
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
asi
= (v9p
->pstate
.cle
) ? SS_ASI_NUCLEUS_LITTLE
: SS_ASI_NUCLEUS
;
asi
= (v9p
->pstate
.cle
) ? SS_ASI_PRIMARY_LITTLE
: SS_ASI_PRIMARY
;
case SS_ASI_NUCLEUS_LITTLE
:
context_type
= ss_ctx_nucleus
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
if (IS_V9_MA_ATOMIC(op
& MA_Op_Mask
)) mflags
|= MF_Atomic_Access
;
case SS_ASI_PRIMARY_NO_FAULT_LITTLE
:
case SS_ASI_PRIMARY_NO_FAULT
:
if (IS_V9_MA_STORE(op
& MA_Op_Mask
))
goto data_access_exception
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
case SS_ASI_AS_IF_USER_PRIMARY_LITTLE
:
case SS_ASI_AS_IF_USER_PRIMARY
:
case SS_ASI_PRIMARY_LITTLE
: /* (88) RW Implicit Primary Address space (LE) */
case SS_ASI_PRIMARY
: /* (80) RW Implicit Primary Address space */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
if (IS_V9_MA_ATOMIC(op
& MA_Op_Mask
)) mflags
|= MF_Atomic_Access
;
context_type
= ss_ctx_primary
;
case SS_ASI_SECONDARY_NO_FAULT_LITTLE
:
case SS_ASI_SECONDARY_NO_FAULT
:
if (IS_V9_MA_STORE(op
& MA_Op_Mask
))
goto data_access_exception
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
case SS_ASI_AS_IF_USER_SECONDARY_LITTLE
:
case SS_ASI_AS_IF_USER_SECONDARY
:
case SS_ASI_SECONDARY_LITTLE
:
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
if (IS_V9_MA_ATOMIC(op
& MA_Op_Mask
)) mflags
|= MF_Atomic_Access
;
context_type
= ss_ctx_secondary
;
case SS_ASI_REAL_IO_LITTLE
: /* (1D) RW Same as ASI_PHYS_USE_EC_LITTLE for memory
addresses. For IO addresses, physical address,
non-cacheable, with side-effect (LE) */
case SS_ASI_REAL_IO
: /* (15) RW Same as ASI_PHYS_USE_EC for memory addresses.
For IO addresses, physical address, non-cacheable,
mflags
|= MF_TLB_Real_Ctx
;
context_type
= ss_ctx_nucleus
;
case SS_ASI_REAL_MEM_LITTLE
: /* (1C) RW physical address, non-allocating in L1 cache */
case SS_ASI_REAL_MEM
: /* (14) RW physical address, non-allocating in L1 cache */
mflags
|= MF_TLB_Real_Ctx
;
if (IS_V9_MA_ATOMIC(op
& MA_Op_Mask
)) mflags
|= MF_Atomic_Access
;
context_type
= ss_ctx_nucleus
;
case SS_ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE
: /* RW 64B block load/store, primary address space, user privilege (LE) */
case SS_ASI_BLOCK_AS_IF_USER_PRIMARY
: /* RW 64B block load/store, primary address space, user privilege */
case SS_ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE
: /* RW 64B block load/store, secondary address space, user privilege (LE) */
case SS_ASI_BLOCK_AS_IF_USER_SECONDARY
: /* RW 64B block load/store, secondary address space, user privilege */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P_LITTLE
: /* Block initializing store/128b atomic LDDA, primary address, user priv (LE) */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_P
: /* Block initializing store/128b atomic LDDA, primary address, user privilege */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S_LITTLE
: /* Block initializing store, secondary address, user privilege (LE) */
case SS_ASI_AS_IF_USER_BLK_INIT_ST_QUAD_LDD_S
: /* Block initializing store/128b atomic LDDA, secondary address, user privilege */
case SS_ASI_QUAD_LDD_LITTLE
: /* 128b atomic LDDA (LE) */
case SS_ASI_QUAD_LDD
: /* 128b atomic LDDA */
/* This ASI must be used with an LDDA instruction */
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
/* Adjust size to 128bytes so alignment is correct */
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
context_type
= ss_ctx_nucleus
;
case SS_ASI_QUAD_LDD_REAL_LITTLE
: /* 128b atomic LDDA, real address (LE) */
case SS_ASI_QUAD_LDD_REAL
: /* 128b atomic LDDA, real address */
/* This ASI must be used with an LDDA instruction */
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
/* Adjust size to 128bytes so alignment is correct */
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
mflags
|= MF_TLB_Real_Ctx
;
context_type
= ss_ctx_nucleus
;
case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD_LITTLE
: /* Block initializing store/128b atomic LDDA (LE) */
case SS_ASI_NUCLEUS_BLK_INIT_ST_QUAD_LDD
: /* Block initializing store/128b atomic LDDA */
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (MA_St
== (op
& MA_Op_Mask
) || MA_StDouble
== (op
& MA_Op_Mask
)) {
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
case SS_ASI_BLK_INIT_ST_QUAD_LDD_P_LITTLE
: /* Block initializing store/128b atomic LDDA, primary address (LE) */
case SS_ASI_BLK_INIT_ST_QUAD_LDD_P
: /* Block initializing store/128b atomic LDDA, primary address */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (MA_St
== (op
& MA_Op_Mask
) || MA_StDouble
== (op
& MA_Op_Mask
)) {
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
case SS_ASI_BLK_INIT_ST_QUAD_LDD_S_LITTLE
: /* Block initializing store/128b atomic LDDA, secondary address (LE) */
case SS_ASI_BLK_INIT_ST_QUAD_LDD_S
: /* Block initializing store/128b atomic LDDA, secondary address */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (MA_St
== (op
& MA_Op_Mask
) || MA_StDouble
== (op
& MA_Op_Mask
)) {
addr
= ((op
& MA_Op_Mask
) == MA_CAS
) ?
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
case SS_ASI_BLK_PL
: /* 64B block load/store, primary address (LE) */
case SS_ASI_BLK_COMMIT_P
: /* Same as SS_ASI_BLK_P on N2 (no commit) */
case SS_ASI_BLK_P
: /* 64B block load/store, primary address */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
) ||
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
op
= (MA_ldfp64
== op
) ? (MA_Size512
| MA_LdFloat
) :
(MA_Size512
| MA_StFloat
);
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
case SS_ASI_BLK_SL
: /* 64B block load/store, secondary address (LE) */
case SS_ASI_BLK_COMMIT_S
: /* Same as SS_ASI_BLK_S on N2 (no commit) */
case SS_ASI_BLK_S
: /* 64B block load/store, secondary address */
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
) ||
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
op
= (MA_ldfp64
== op
) ? (MA_Size512
| MA_LdFloat
) :
(MA_Size512
| MA_StFloat
);
mask
= (1<<(op
& MA_Size_Mask
))-1;
mflags
|= MF_Atomic_Access
;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with STDFA instruction */
if (!(MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
op
= (MA_ldfp64
== op
) ? MA_ldfp8
: MA_stfp8
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
op
= (MA_ldfp64
== op
) ? MA_ldfp8
: MA_stfp8
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
op
= (MA_ldfp64
== op
) ? MA_ldfp16
: MA_stfp16
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
if (nsp
->mmu_bypass
) mflags
|= MF_MMU_Bypass
;
/* This ASI must be used with an LDDFA/STDFA instruction */
if (!(MA_ldfp64
== op
|| MA_stfp64
== op
)) {
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
op
= (MA_ldfp64
== op
) ? MA_ldfp16
: MA_stfp16
;
mask
= (1<<(op
& MA_Size_Mask
))-1;
if ((MA_LdFloat
== (op
& MA_Op_Mask
)) || (MA_StFloat
== (op
& MA_Op_Mask
)) ) {
ss_memory_asi_access(sp
, op
, (uint64_t *)&(sp
->fpreg
.s32
[regnum
]), mflags
, asi
, context_type
, mask
, reg1
, reg2
);
ss_memory_asi_access(sp
, op
, &(sp
->intreg
[regnum
]), mflags
, asi
, context_type
, mask
, reg1
, reg2
);
ASSERT(0LL==sp
->intreg
[Reg_sparcv9_g0
]);
/* OK, derive access address etc. */
size
= op
& MA_Size_Mask
;
is_load
= IS_V9_MA_LOAD(op
);
/* No MA_CAS case required for cpu state registers. */
* Finally all the cpu state registers ...
* Currently only 64bit accesses supported ..
* need to ascertain exactly what niagara does here ! FIXME
* FIXME: Of course all the alt address space accesses are different here !
if (size
!= MA_Size64
|| (addr
&0x7)!=0 || IS_V9_MA_ATOMIC(op
))
goto data_access_exception
;
ASSERT(MA_LdSigned
!= op
); /* not signed for any stxas or for ldxas */
IMPL_WARNING(("Unimplemented niagara2 asi %s (0x%x) accessed with address 0x%llx @ pc=%lx\n", #s, asi, addr, sp->pc)); \
if (is_load) { val = 0; goto load_complete; }\
#if ERROR_TRAP_GEN /* { */
#else /* } ERROR_TRAP_GEN { */
#define TODO(s) fatal("Unimplemented niagara2 asi %s (0x%x) accessed with address 0x%llx @ pc=%lx\n", #s, asi, addr, sp->pc)
#endif /* } ERROR_TRAP_GEN { */
/* If we're storing fetch the value to stuff */
val
= sp
->intreg
[regnum
];
} else { /* MA_StFloat */
val
= sp
->fpreg
.s32
[regnum
];
val
= sp
->fpreg
.s64
[regnum
>> 1];
/* Hex Access VA Repli- DESCRIPTION */
/* MANDATORY SPARC V9 ASIs */
/* All in the memory section above */
/* SunSPARC EXTENDED (non-V9) ASIs */
* 0x20 RW 0-18 Y Scratchpad Registers
* 0x20 - 20-28 N any type of access causes data_access_exception
* 0x20 RW 30-38 Y Scratchpad Registers
if (INVALID_SCRATCHPAD(addr
)) {
goto data_access_exception
;
&(nsp
->strand_reg
[SSR_ScratchPad0
+ (addr
>>3)]);
DBGSCRATCH( if (*valp
!= val
)
lprintf(sp
->gid
, "SCRATCH store 0x%x/0x%llx: "
"0x%llx -> 0x%llx pc=0x%llx\n",
asi
, addr
, *valp
, val
, sp
->pc
); );
* 0x21 RW 8 Y I/DMMU Primary Context Register
* 0x21 RW 10 Y DMMU Secondary Context Register
* 0x21 RW 120 Y I/DMMU Synchronous Fault Pointer
* 0x21 RW 108 Y I/DMMU Primary Context Register 1
* 0x21 RW 110 Y DMMU Secondary Context Register 1
val
= (uint64_t)(nsp
->pri_context
);
val
= (uint64_t)(nsp
->sec_context
);
val
= (uint64_t)(nsp
->pri_context1
);
val
= (uint64_t)(nsp
->sec_context1
);
goto data_access_exception
;
* Since we're changing a context register we should
* flush the xi and xd trans caches. However, this only matters
* for the primary context - iff we are in priv mode with
* TL=0. For all other cases (TL>0) or hpriv=1, either the
* MMU is not in use, or we're executing the nucleus context so
* we can rely on a done/retry instn / mode change to do the flush for us
* when we change mode later.
DBGMMU( lprintf(sp
->gid
, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
if (nsp
->pri_context
!=val
||
nsp
->pri_context1
!=val
) {
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
* update the corresponding second context register for
nsp
->sec_context
= val
& MASK64(12,0);
* update the corresponding second context register for
nsp
->sec_context1
= val
& MASK64(12,0);
if (nsp
->pri_context1
!=val
) {
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
nsp
->sec_context1
= val
& MASK64(12,0);
goto data_access_exception
;
case SS_ASI_QUEUE
: /* 0x25 RW 3C0 Y CPU Mondo Queue Head Pointer */
/* 0x25 RW 3C8 Y CPU Mondo Queue Tail Pointer */
/* 0x25 RW 3D0 Y Device Mondo Queue Head Pointer */
/* 0x25 RW 3D8 Y Device Mondo Queue Tail Pointer */
/* 0x25 RW 3E0 Y Resumable Error Queue Head Pointer */
/* 0x25 RW 3E8 Y Resumable Error Queue Tail Pointer */
/* 0x25 RW 3F0 Y Nonresumable Error Queue Head Pointer */
/* 0x25 RW 3F8 Y Nonresumable Error Queue Tail Pointer */
val
= (uint16_t)(nsp
->nqueue
[ (addr
>>4) - 0x3c].head
);
val
= (uint16_t)(nsp
->nqueue
[(addr
>>4) - 0x3c].tail
);
goto data_access_exception
;
DBGMONDO( lprintf(sp
->gid
, "ASI_QUEUE store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
RSVD_MASK(sp
, MASK64(17, 6), val
, asi
, addr
);
nsp
->nqueue
[(addr
>>4) - 0x3c].head
= (uint16_t)val
;
nsp
->flag_queue_irq
[(addr
>>4)- 0x3c] = nsp
->nqueue
[(addr
>>4) - 0x3c].head
!= nsp
->nqueue
[(addr
>>4) - 0x3c].tail
;
if (v9p
->state
!= V9_HyperPriv
&&
goto data_access_exception
; /* DAX if store to tail in privileged mode */
nsp
->nqueue
[(addr
>>4) - 0x3c].tail
= (uint16_t)val
;
nsp
->flag_queue_irq
[(addr
>>4)- 0x3c] = nsp
->nqueue
[(addr
>>4) - 0x3c].head
!= nsp
->nqueue
[(addr
>>4) - 0x3c].tail
;
goto data_access_exception
;
#if INTERNAL_BUILD /* { */
/* 0x40 RW 0 N CWQ HEAD */
/* 0x40 RW 8 N CWQ Tail */
/* 0x40 RW 10 N CWQ First */
/* 0x40 RW 18 N CWQ Last*/
/* 0x40 RW 20 N CWQ CSR */
/* 0x40 RW 28 N CWQ CSR_ENABLE */
/* 0x40 RW 30 N CWQ SYNC */
/* 0x40 RW 80 N MA CONTROL */
/* 0x40 RW 88 N MA PHYS ADDR */
/* 0x40 RW 90 N MA ADDR (8 elements) */
/* 0x40 RW 98 N MA NP REG */
/* 0x40 RW a0 N MA SYNC */
if ((op
& MA_Op_Mask
) == MA_St
) {
#ifdef LOG_ASI_STREAM_MA_ACCESSES
EXEC_WARNING(("Got a store to the MA_STREAM ASI, "
"addr=0x%llx, value=0x%llx\n", addr
, val
));
EXEC_WARNING(("Store MA size not 64 bits: "
"pc=%p ASI: %x addr: %p, "
"mod_arith \n", sp
->pc
, asi
, addr
));
goto data_access_exception
;
rv
= set_CWQ_head_reg(sp
, val
);
rv
= set_CWQ_tail_reg(sp
, val
);
rv
= set_CWQ_first_reg(sp
, val
);
rv
= set_CWQ_last_reg(sp
, val
);
/* set HWE, PrE, and enable bits */
rv
= set_CWQ_CSR_reg(sp
, val
, 0xdULL
);
/* set only enable bit */
rv
= set_CWQ_CSR_reg(sp
, val
, 0x1ULL
);
/* 30 is CWQ_sync_reg, illegal */
rv
= set_MA_ctl_reg(sp
, val
);
rv
= set_MA_physad_reg(sp
, val
);
rv
= set_MA_memad_reg(sp
, val
);
rv
= set_MA_nprime_reg(sp
, val
);
/* a0 is MA SYNC REG, illegal */
EXEC_WARNING(("DAX in ASI_STREAM_MA\n"));
goto data_access_exception
;
case MA_STREAM_MEM_ALIGN_TRAP
:
EXEC_WARNING(("Mem align error in "
"ASI_STREAM_MA store\n"));
goto data_access_exception
;
case MA_STREAM_DATA_ACCESS_EX_TRAP
:
EXEC_WARNING(("DAX in ASI_STREAM_MA store\n"));
goto data_access_exception
;
case MA_STREAM_ILLEGAL_INST_TRAP
:
v9p
->post_precise_trap(sp
,
Sparcv9_trap_illegal_instruction
);
EXEC_WARNING(("fatal error during MA register "
fatal("fatal error during MA register store");
FIXME_WARNING(("invalid return code from MA "
} else if ((op
& MA_Op_Mask
) == MA_Ld
||
(op
& MA_Op_Mask
) == MA_LdSigned
) {
#ifdef LOG_ASI_STREAM_MA_ACCESSES
EXEC_WARNING(("Got a load from the MA_STREAM ASI, "
EXEC_WARNING(("Load from ASI_STREAM_MA, "
"pc=%p ASI: %x addr: %p, mod_arith \n",
goto data_access_exception
;
rv
= query_CWQ_head_reg(sp
, &val
);
rv
= query_CWQ_tail_reg(sp
, &val
);
rv
= query_CWQ_first_reg(sp
, &val
);
rv
= query_CWQ_last_reg(sp
, &val
);
rv
= query_CWQ_CSR_reg(sp
, &val
);
/* note: no 0x28, as CSR.enable is write only */
rv
= query_CWQ_sync_reg(sp
, &val
);
rv
= query_MA_ctl_reg(sp
, &val
);
rv
= query_MA_physad_reg(sp
, &val
);
rv
= query_MA_memad_reg(sp
, &val
);
rv
= query_MA_nprime_reg(sp
, &val
);
rv
= query_MA_sync_reg(sp
, &val
);
EXEC_WARNING(("DAX in ASI_STREAM_MA load\n"));
goto data_access_exception
;
case MA_STREAM_LD_COMPLETE
:
#if LOG_ASI_STREAM_MA_ACCESSES
EXEC_WARNING(("Got a load from the "
"addr=0x%llx, returned 0x%llx\n",
case MA_STREAM_MEM_ALIGN_TRAP
:
EXEC_WARNING(("Alignment error in "
"ASI_STREAM_MA store\n"));
goto data_access_exception
;
case MA_STREAM_DATA_ACCESS_EX_TRAP
:
EXEC_WARNING(("DAX in ASI_STREAM_MA\n"));
goto data_access_exception
;
case MA_STREAM_ILLEGAL_INST_TRAP
:
v9p
->post_precise_trap(sp
,
Sparcv9_trap_illegal_instruction
);
IMPL_WARNING(("fatal error during MA register "
fatal("fatal error during ASI_STREAM_MA "
return; /* control won't actually get here */
fatal("Unexpected rv during ASI_STREAM_MA "
EXEC_WARNING(("Illegal memory operation 0x%x to "
"STREAM ASI pc=%p ASI: %x addr: %p, "
"mod_arith \n", op
, sp
->pc
, asi
, addr
));
goto data_access_exception
;
#endif /* INTERNAL_BUILD } */
case SS_ASI_CMP
: /* 0x41 R 0 S Core Available */
/* 0x41 R 10 S Core Enable Status */
/* 0x41 RW 20 S Core Enable */
/* 0x41 RW 30 S XIR Steering */
/* 0x41 RW 38 S Tick Enable */
/* 0x41 RW 40 S Error Steering, not implemented */
/* 0x41 RW 50 S Core Running RW */
/* 0x41 R 58 S Core Running Status */
/* 0x41 W 60 S Core Running W1S */
/* 0x41 W 68 S Core Running W1C */
val
= npp
->cmp_regs
.core_enable_status
;
IMPL_WARNING(("asi_xir_steering (asi: 0x%lx va: 0x%lx) not implemented\n",asi
, addr
));
val
= npp
->cmp_regs
.tick_enable
? 1 : 0;
val
= npp
->cmp_regs
.core_running_status
;
* ASI_CORE_RUNNING_{W1S, W1c}, write-only
goto data_access_exception
;
goto data_access_exception
;
IMPL_WARNING(("asi_core_enable: (asi: 0x%lx va: 0x%lx) not supported\n",asi
, addr
));
IMPL_WARNING(("asi_xir_steering (asi: 0x%lx va: 0x%lx) not implemented\n",asi
, addr
));
/* For multinode systems, should not be using ASI_CMP_TICK_ENABLE */
if (sp
->config_procp
->domainp
->procs
.count
> 1)
EXEC_WARNING(("For multinode systems, use of ASI_CMP_TICK_ENABLE(asi: 0x%lx va: 0x%lx)\n"
"is not recommended for tick sync purposes(VF PRM 0.1 Sec 3.1.1).\n",
RSVD_MASK(sp
, MASK64(0,0), val
, asi
, addr
);
pthread_mutex_lock(&npp
->tick_en_lock
);
if (!npp
->ncxp
->tick_en_slow
)
if (!val
&& !npp
->tick_stop
) {
/* now stop all tick counters */
for (idx
= 0; idx
< npp
->nstrands
; idx
++) {
tnsp
= &(npp
->ss_strandp
[idx
]);
if (tnsp
->core
!= core_num
) {
tv9p
->tick
->offset
+= RAW_TICK(tv9p
);
ss_recomp_tick_target(tsp
);
if (val
&& npp
->tick_stop
) {
/* now start all tick counters */
for (idx
= 0; idx
< npp
->nstrands
; idx
++) {
tnsp
= &(npp
->ss_strandp
[idx
]);
if (tnsp
->core
!= core_num
) {
tv9p
->tick
->offset
-= RAW_TICK(tv9p
);
ss_recomp_tick_target(tsp
);
npp
->cmp_regs
.tick_enable
= val
? true : false;
pthread_mutex_unlock(&npp
->tick_en_lock
);
* WS: according to the CMP PRM, writing a '1' to a bit will be ignored
* if the corresponding bit in the core enable reg is 0 (i.e., the
* corresponding virtual core is not enabled)
pthread_mutex_lock(&npp
->cmp_lock
);
npp
->cmp_regs
.core_running_status
= val
& npp
->cmp_regs
.core_enable_status
;
ss_change_exec_state(npp
, npp
->cmp_regs
.core_running_status
);
pthread_mutex_unlock(&npp
->cmp_lock
);
* W1S: new_value = old_value | new_value;
pthread_mutex_lock(&npp
->cmp_lock
);
npp
->cmp_regs
.core_running_status
|= val
;
* According to the CMP PRM, writing a '1' to a bit will be ignored
* if the corresponding bit in the core enable reg is 0 (i.e., the
* corresponding virtual core is not enabled)
npp
->cmp_regs
.core_running_status
&= npp
->cmp_regs
.core_enable_status
;
* FIXME: need to check if the virtual core is attempting to park
* all the virtual cores (this is prevented by the hardware)
ss_change_exec_state(npp
, npp
->cmp_regs
.core_running_status
);
pthread_mutex_unlock(&npp
->cmp_lock
);
* W1C: new_value = old_value & ~new_value;
pthread_mutex_lock(&npp
->cmp_lock
);
npp
->cmp_regs
.core_running_status
&= ~val
;
ss_change_exec_state(npp
, npp
->cmp_regs
.core_running_status
);
pthread_mutex_unlock(&npp
->cmp_lock
);
goto data_access_exception
;
case SS_ASI_LSU_DIAG_REG
: /* 0x42 RW 0 N Sparc BIST control register */ /* SPARC_BIST_CONTROL */
/* 0x42 RW 8 N Sparc Instruction Mask Register */ /* INST_MASK_REG */
/* 0x42 RW 10 N Load/Store Unit Diagnostic Register */ /* LSU_DIAG_REG */
val
= nsp
->icachep
->bist_ctl
;
val
= nsp
->icachep
->inst_mask
;
val
= (nsp
->dcachep
->assocdis
? 2 : 0) |
(nsp
->icachep
->assocdis
? 1 : 0);
nsp
->icachep
->bist_ctl
= val
& 0x7f;
if (val
& 1) nsp
->icachep
->bist_ctl
|= 0x400;
nsp
->icachep
->inst_mask
= val
;
if (val
& 2) nsp
->dcachep
->assocdis
= true;
if (val
& 1) nsp
->icachep
->assocdis
= true;
goto data_access_exception
;
case SS_ASI_ERROR_INJECT_REG
: /* 0x43 RW 0 N Sparc Error Injection Register */
goto data_access_exception
;
/* TODO: provide per-core field to store this */
if ((val
& BIT(31)) != 0)
IMPL_WARNING(("ASI_ERROR_INJECT_REG not "
"implemented (pc=0x%llx)", sp
->pc
));
case SS_ASI_LSU_CONTROL_REG
: /* 0x45 RW 0 Y Load/Store Unit Control Register */
val
= (nsp
->lsu_control_raw
& ~(LSU_CTRL_DMMU_EN
| LSU_CTRL_IMMU_EN
)) |
(nsp
->dmmu
.enabled
? LSU_CTRL_DMMU_EN
: 0LL) |
(nsp
->immu
.enabled
? LSU_CTRL_IMMU_EN
: 0LL);
* can only issue this in hpriv mode, so even though we turn the mmu
* on and off, we dont need to flush the x and d translation caches
* because in hpriv mode we're only fetching physical addressses.
ASSERT( V9_RED
== v9p
->state
|| V9_HyperPriv
== v9p
->state
);
val
&= LSU_CTRL_REG_MASK
;
if ((val
& (LSU_CTRL_WATCH_RE
|LSU_CTRL_WATCH_WE
)) != 0) {
IMPL_WARNING(("ASI_LSU_CONTROL_REG watchpoint enable unimplemented @ pc=%lx\n", sp
->pc
));
nsp
->lsu_control_raw
= val
;
nsp
->dmmu
.enabled
= (val
& LSU_CTRL_DMMU_EN
) != 0;
nsp
->immu
.enabled
= (val
& LSU_CTRL_IMMU_EN
) != 0;
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
case 0x8: /* 0x45 RW 8 N ASI_DECR */
case 0x18: /* 0x45 RW 18 N ASI_RST_VEC_MASK */
ITODO(SS_ASI_LSU_CONTROL_REG
);
goto data_access_exception
;
case SS_ASI_DCACHE_DATA
: /* 0x46 RW - N Dcache data array diagnostics access */
uint64_t idx
, lineword
, tag
;
/* L1 D-Cache Diagnostic Access Section 28.6 of N2 PRM 1.1 */
lineword
= addr
&SS_DCACHE_DATA_BITS
;
tag
= (addr
&SS_DCACHE_DATA_TAG_BITS
)>>10;
RW_rdlock(&nsp
->dcachep
->rwlock
);
* must match tag to load data
* iterate over 4 ways at bits [12:11]
for (idx
=lineword
+0x1800; idx
>=lineword
; idx
-=0x800) {
if (nsp
->dcachep
->tagp
[idx
] == tag
) {
val
= nsp
->dcachep
->datap
[idx
];
EXEC_WARNING( ("ASI_DCACHE_DATA load tag 0x%xll has no match",
addr
&SS_DCACHE_DATA_TAG_BITS
) );
RW_unlock(&nsp
->dcachep
->rwlock
);
/* L1 D-Cache Diagnostic Access Section 28.6 of N2 PRM 1.1 */
idx
= (addr
&SS_DCACHE_DATA_BITS
)>>3;
RW_wrlock(&nsp
->dcachep
->rwlock
);
nsp
->dcachep
->datap
[idx
] = val
;
RW_unlock(&nsp
->dcachep
->rwlock
);
case SS_ASI_DCACHE_TAG
: /* 0x47 RW - N Dcache tag and valid bit diagnostics access */
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (addr
&SS_DCACHE_TAG_WAYLINE_BITS
)>>4;
RW_rdlock(&nsp
->dcachep
->rwlock
);
val
= nsp
->dcachep
->tagp
[idx
];
RW_unlock(&nsp
->dcachep
->rwlock
);
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (addr
&SS_DCACHE_TAG_WAYLINE_BITS
)>>4;
RW_wrlock(&nsp
->dcachep
->rwlock
);
nsp
->dcachep
->tagp
[idx
] = val
;
RW_unlock(&nsp
->dcachep
->rwlock
);
case N2_ASI_IRF_ECC_REG
: /* 0x48 RO 0-F8 Y IRF ECC diagnostic access */
if (!is_load
) goto data_access_exception
;
case N2_ASI_FRF_ECC_REG
: /* 0x49 RO 0-F8 Y FRF ECC diagnostic access */
if (!is_load
) goto data_access_exception
;
case N2_ASI_STB_ACCESS
: /* 0x4A RO 0-1F8 Y Store buffer diagnostic access */
if (!is_load
) goto data_access_exception
;
case N2_ASI_DESR
: /* 0x4C R 0 Y Disrupting error status
R 8 Y Deferred error status
RW 10 N Core error reporting enable
RW 18 Y Core error trap enable
R 20 Y Core local error status
R 28 Y Core local error status */
/* handled by ss_error_asi_noop_access */
goto data_access_exception
;
case N2_ASI_SPACE_PWR_MGMT
: /* 0x4E RW 0 Y Sparc power management */
val
= npp
->sparc_power_mgmtp
[core_num
];
npp
->sparc_power_mgmtp
[core_num
] = (val
& MASK64(15,0));
goto data_access_exception
;
case SS_ASI_HYP_SCRATCHPAD
:
* 0x4F RW 0-38 Y Hypervisor Scratchpad
* 0x4F RW 0-18 Y Hypervisor Scratchpad
if (INVALID_HYP_SCRATCHPAD(addr
)) {
goto data_access_exception
;
&(nsp
->strand_reg
[SSR_HSCRATCHPAD_INDEX
+ (addr
>>3)]);
DBGSCRATCH( if (*valp
!= val
)
lprintf(sp
->gid
, "SCRATCH store 0x%x/0x%llx: "
"0x%llx -> 0x%llx pc=0x%llx\n",
asi
, addr
, *valp
, val
, sp
->pc
); );
case SS_ASI_IMMU
: /* 0x50 R 0 Y IMMU Tag Target register */
/* 0x50 RW 18 Y IMMU Synchronous Fault Status Register */
/* 0x50 RW 30 Y IMMU TLB Tag Access Register */
/* 0x50 RW 38 Y IMMU VA Data Watchpoint Register */
val
= (mmup
->tag_access_reg
>> 22) | ((mmup
->tag_access_reg
&MASK64(12,0))<<48);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
val
= mmup
->tag_access_reg
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
nsp
->error
.isfsr
= val
& MMU_SFSR_MASK
;
sp
->xicache_trans_flush_pending
= true;
mmup
->tag_access_reg
= VA48(val
);
mmup
->watchpoint
= VA48(val
);
goto data_access_exception
;
case N2_ASI_MRA_ACCESS
: /* 0x51 RO 0-38 Y HWTW MRA Access */
if (!is_load
) goto data_access_exception
;
case N2_ASI_MMU_REAL_RANGE
: /* 0x52 RW 108-120 Y MMU TSB Real Range
208-220 Y MMU TSB Physical Offset */
idx
= ((addr
- 0x108) >> 3) & 0x3;
val
= nsp
->real_range_reg
[idx
];
idx
= ((addr
- 0x208) >> 3) & 0x3;
val
= nsp
->phy_off_reg
[idx
];
DBGMMU( lprintf(sp
->gid
, "MMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
idx
= ((addr
- 0x108) >> 3) & 0x3;
nsp
->real_range_reg
[idx
] = val
;
idx
= ((addr
- 0x208) >> 3) & 0x3;
nsp
->phy_off_reg
[idx
] = val
;
goto data_access_exception
;
case N2_ITLB_PROBE
: /* 0x53 R 0 N ITLB Probe */
if (!is_load
) goto data_access_exception
;
case SS_ASI_ITLB_DATA_IN_REG
: /* 0x54 W 0,400 N IMMU data in register */
/* 0x54 RW 10-28 N Ctxt zero TSB config 0-3 register */
/* 0x54 RW 30-40 N Ctxt nonzero TSB config 0-3 register */
/* 0x54 R 50-68 N ITSB pointer 0-3 register */
/* 0x54 R 70-88 N DTSB pointer 0-3 register */
idx
= ((addr
- 0x10) >> 3) & 0x3;
val
= nsp
->mmu_zero_ctxt_tsb_config
[idx
].data
;
idx
= ((addr
- 0x30) >> 3) & 0x3;
val
= nsp
->mmu_nonzero_ctxt_tsb_config
[idx
].data
;
idx
= ((addr
- 0x50) >> 3) & 0x3;
if ((mmup
->tag_access_reg
& MASK64(12,0)) == 0)
tsbinfop
= &nsp
->mmu_zero_ctxt_tsb_config
[idx
];
tsbinfop
= &nsp
->mmu_nonzero_ctxt_tsb_config
[idx
];
val
= ss_make_tsb_pointer(mmup
->tag_access_reg
, tsbinfop
);
DBGMMU( lprintf(sp
->gid
, "MMU ASI load 0x%x/0x%llx : 0x%llx (%cTSB PTR%d) (pc=0x%llx)\n", asi
, addr
, val
, mmup
->is_immu
? 'I' : 'D', idx
, sp
->pc
); );
idx
= ((addr
- 0x70) >> 3) & 0x3;
if ((mmup
->tag_access_reg
& MASK64(12,0)) == 0)
tsbinfop
= &nsp
->mmu_zero_ctxt_tsb_config
[idx
];
tsbinfop
= &nsp
->mmu_nonzero_ctxt_tsb_config
[idx
];
val
= ss_make_tsb_pointer(mmup
->tag_access_reg
, tsbinfop
);
DBGMMU( lprintf(sp
->gid
, "MMU ASI load 0x%x/0x%llx : 0x%llx (%cTSB PTR%d) (pc=0x%llx)\n", asi
, addr
, val
, mmup
->is_immu
? 'I' : 'D', idx
, sp
->pc
); );
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
uint64_t pa_offset_unused
= 0;
* addr=0x0: 'Real' bit<10> = 0, load VA->PA translation into TLB
* addr=0x400: 'Real' bit<10> = 1, load RA->PA translation into TLB
is_real
= SS_TLB_IS_REAL(addr
);
if (ss_tlb_insert(sp
, mmup
, tlbp
, nsp
->partid
, is_real
, val
, &flags_unused
, &pa_offset_unused
) == SS_trap_NONE
)
goto data_access_exception
;
idx
= ((addr
- 0x10) >> 3) & 0x3;
tsbinfop
= &nsp
->mmu_zero_ctxt_tsb_config
[idx
];
niagara2_write_tsb_config(sp
, tsbinfop
, val
);
idx
= ((addr
- 0x30) >> 3) & 0x3;
tsbinfop
= &nsp
->mmu_nonzero_ctxt_tsb_config
[idx
];
niagara2_write_tsb_config(sp
, tsbinfop
, val
);
goto data_access_exception
;
case SS_ASI_ITLB_DATA_ACCESS_REG
: /* 0x55 RW 0-1F8,400-5f8 N IMMU TLB Data Access Register */
idx
= (addr
>> 3) & 0x7f;
if (idx
>= tlbp
->nentries
) goto data_access_exception
;
if (tlb_data_access_error_match(sp
, mmup
, idx
))
RW_rdlock(&tlbp
->rwlock
);
tep
= &tlbp
->tlb_entryp
[idx
];
RW_unlock(&tlbp
->rwlock
);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
is_real
= SS_TLB_IS_REAL(addr
);
if (ss_tlb_insert_idx(sp
, mmup
, tlbp
, nsp
->partid
,
is_real
, val
, idx
) == SS_trap_NONE
)
goto data_access_exception
;
case SS_ASI_ITLB_TAG_READ_REG
: /* 0x56 R 0-1F8,400-5F8 N IMMU TLB Tag Read Register */
idx
= (addr
>> 3) & 0x7f;
if (idx
>= tlbp
->nentries
) goto data_access_exception
;
if (tlb_tag_access_error_match(sp
, mmup
, idx
))
RW_rdlock(&tlbp
->rwlock
);
tep
= &tlbp
->tlb_entryp
[idx
];
val
= ((uint64_t)tep
->partid
<< 61) |
((uint64_t)(tep
->is_real
?1:0) << 60);
val
|= (tep
->tag_pfn
& MASK64(47, 13)) | (uint64_t)tep
->tag_context
;
/* TODO: Return Parity and Used bits when implemented. */
RW_unlock(&tlbp
->rwlock
);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
goto data_access_exception
;
case SS_ASI_IMMU_DEMAP
: /* 0x57 W 0 Y IMMU TLB Demap */
if (is_load
) goto data_access_exception
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
op
= (ss_demap_t
) ((addr
>>6)&0x3);
switch ((addr
>> 4)&0x3) {
case 0x0: context
= nsp
->pri_context
; break; /* primary context */
case 0x1: context
= nsp
->sec_context
; /* secondary context */
* immu doesn't support secondary context encoding for
* demap page and demap context ops (causing demap to be ignored)
if ((mmup
->is_immu
) && (op
==NA_demap_page
|| op
==NA_demap_context
))
case 0x2: context
= SS_NUCLEUS_CONTEXT
; break; /* nucleus context */
* use of reserved value is valid but causes
* demap to be ignored for the following two ops
if (op
==NA_demap_page
|| op
==NA_demap_context
) {
EXEC_WARNING(("(@pc=0x%llx) demap noop "
"asi=0x%x va=0x%llx", sp
->pc
, asi
, addr
));
if (op
== NA_demap_page
) {
if ((addr
& BIT(47)) == 0) {
if ((addr
& MASK64(63, 48)) != 0) {
EXEC_WARNING(("(@pc=0x%llx) demap "
if ((addr
& MASK64(63, 48)) != MASK64(63, 48)) {
EXEC_WARNING(("(@pc=0x%llx) demap "
is_real
= SS_TLB_IS_REAL(addr
);
if (!ss_demap(sp
, op
, mmup
, tlbp
, nsp
->partid
, is_real
, context
, addr
)) goto data_access_exception
;
case SS_ASI_DMMU
: /* 0x58 R 0 Y D-MMU Tag Target Register */
/* 0x58 RW 18 Y DMMU Synchronous Fault Status Register */
/* 0x58 R 20 Y DMMU Synchronous Fault Address Register */
/* 0x58 RW 30 Y DMMU TLB Tag Access Register */
/* 0x58 RW 38 Y DMMU VA Data Watchpoint Register */
/* 0x58 RW 40 Y Niagara 2: Tablewalk Config Reg */
/* 0x58 RW 80 Y I/DMMU Partition ID */
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI load 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
val
= (uint64_t)(nsp
->partid
);
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
nsp
->error
.dsfsr
= val
& MMU_SFSR_MASK
;
mmup
->watchpoint
= VA48(val
);
RSVD_MASK(sp
, MASK64(1,0), val
, asi
, addr
);
IMPL_WARNING(("Unimplemented hwtw config control bits 0x%x\n",val
));
/* can only do in hypervisor mode - switching mode causes the xi/xd
nsp
->partid
= val
& 0x7; /* three bits of part id only */
sp
->xicache_trans_flush_pending
= true;
sp
->xdcache_trans_flush_pending
= true;
goto data_access_exception
;
case N2_SCRATCHPAD_ACCESS
: /* 0x59 RO 0-78 Y Scratchpad Register Diagnostic Acces */
if (!is_load
) goto data_access_exception
;
case N2_TICK_ACCESS
: /* 0x5A RO 0-8,10,20-30 Y Tick Register Diagnostic Access */
if (!is_load
) goto data_access_exception
;
case N2_TSA_ACCESS
: /* 0x5B RO 0-38 Y TSA Diagnostic Access */
if (!is_load
) goto data_access_exception
;
case SS_ASI_DTLB_DATA_IN_REG
: /* 0x5C W 0 N DMMU data in register */
uint64_t pa_offset_unused
;
if (is_load
|| (addr
& ~SS_TLB_REAL_MASK
)!=0)
goto data_access_exception
;
DBGMMU( lprintf(sp
->gid
, "%cMMU ASI store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", mmup
->is_immu
? 'I' : 'D', asi
, addr
, val
, sp
->pc
); );
is_real
= SS_TLB_IS_REAL(addr
);
if (ss_tlb_insert(sp
, mmup
, tlbp
, nsp
->partid
, is_real
, val
, &flags_unused
, &pa_offset_unused
) == SS_trap_NONE
)
goto data_access_exception
;
case SS_ASI_DTLB_DATA_ACCESS_REG
: /* 0x5D RW 0-7F8 N DMMU TLB Data Access Register */
case SS_ASI_DTLB_TAG_READ_REG
: /* 0x5E R 0-7F8 N DMMU TLB Tag Read Register */
case SS_ASI_DMMU_DEMAP
: /* 0x5F W 0 Y DMMU TLB Demap */
case SS_ASI_CMP_CORE_INTR_ID
: /* 0x63 R 0 Y Core Interrupt ID
if (!is_load
) goto data_access_exception
;
val
= ((uint64_t)(STRANDSPERCORE
- 1)<<32) |
((STRANDS_PER_CHIP
- 1)<<16) |
goto data_access_exception
;
case SS_ASI_ICACHE_INSTR
: /* 0x66 RW - N Icache data array diagnostics access */
/* L1 I-Cache Diagnostic Access Section 28.5 of N2 PRM 1.1 */
idx
= ((addr
&SS_ICACHE_DATA_LINEWORD_BITS
)|((addr
&SS_ICACHE_DATA_WAY_BITS
)>>3))>>3;
RW_rdlock(&nsp
->icachep
->rwlock
);
val
= nsp
->icachep
->datap
[idx
];
RW_unlock(&nsp
->icachep
->rwlock
);
/* L1 I-Cache Diagnostic Access Section 28.5 of N2 PRM 1.1 */
idx
= ((addr
&SS_ICACHE_DATA_LINEWORD_BITS
)|((addr
&SS_ICACHE_DATA_WAY_BITS
)>>3))>>3;
RW_wrlock(&nsp
->icachep
->rwlock
);
nsp
->icachep
->datap
[idx
] = val
;
RW_unlock(&nsp
->icachep
->rwlock
);
case SS_ASI_ICACHE_TAG
: /* 0x67 RW - N Icache tag and valid bit diagnostics access */
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (((addr
&SS_ICACHE_TAG_LINE_BITS
)>>3)|((addr
&SS_ICACHE_TAG_WAY_BITS
)>>6))>>3;
RW_rdlock(&nsp
->icachep
->rwlock
);
val
= nsp
->icachep
->tagp
[idx
];
RW_unlock(&nsp
->icachep
->rwlock
);
/* L1 I-Cache Diagnostic Access Section 18.3 of PRM 1.2 */
idx
= (((addr
&SS_ICACHE_TAG_LINE_BITS
)>>3)|((addr
&SS_ICACHE_TAG_WAY_BITS
)>>6))>>3;
RW_wrlock(&nsp
->icachep
->rwlock
);
nsp
->icachep
->tagp
[idx
] = val
;
RW_unlock(&nsp
->icachep
->rwlock
);
case N2_ASI_INTR_RECEIVE
: /* 0x72 RW 0 Y Interrupt Receive Register */
if (0LL != addr
) goto data_access_exception
;
/* pthread_mutex_lock(&nsp->irq_lock); */
/* pthread_mutex_unlock(&nsp->irq_lock); */
pthread_mutex_lock(&nsp
->irq_lock
);
pthread_mutex_unlock(&nsp
->irq_lock
);
/* TODO: need to check interrupts? (bits cleared) */
case N2_ASI_INTR_W
: /* 0x73 W 0 Y Interrupt Vector Dispatch Register */
if (0LL != addr
|| is_load
) goto data_access_exception
;
DBGMONDO( lprintf(sp
->gid
, "ASI_INTR_W store 0x%x/0x%llx : 0x%llx (pc=0x%llx)\n", asi
, addr
, val
, sp
->pc
); );
niagara2_send_xirq(sp
, npp
, val
);
case N2_ASI_INTR_R
: /* 0x74 R 0 Y Incoming Vector Register */
if (0LL != addr
|| !is_load
) goto data_access_exception
;
pthread_mutex_lock(&nsp
->irq_lock
);
pthread_mutex_unlock(&nsp
->irq_lock
);
if (vec
& 0xffffffff00000000ull
) {
nsp
->irq_vector
&= ~((uint64_t)1<<bit
);
pthread_mutex_unlock(&nsp
->irq_lock
);
#if ERROR_TRAP_GEN /* { */
* Check for error trap generation ESR related accesses.
* Returns true if a valid ASI/VA access was made
if (ss_error_asi_access(sp
, op
, regnum
, asi
, is_load
, addr
, val
))
#else /* } if not ERROR_TRAP_GEN { */
* Even during normal execution, the hypervisor does some
* amount of error register initialization which shouldn't
if (ss_error_asi_noop_access(sp
, op
, regnum
, asi
, is_load
, addr
))
#endif /* } ERROR_TRAP_GEN */
tt
= (sparcv9_trap_type_t
)SS_trap_DAE_invalid_ASI
;
ASSERT(0LL==sp
->intreg
[Reg_sparcv9_g0
]);
v9p
->post_precise_trap(sp
, tt
);
#if ERROR_TRAP_GEN /* { */
* When Error Trap generation is turned on, we need to
* check all ASI load operations against the list of
* ASI/VA/value pairs provided by the user.
* This allows very precise control over which code paths
* are tested in the simulated SW.
ss_check_user_asi_list(sp
, asi
, addr
, &val
, true, true);
#endif /* } ERROR_TRAP_GEN */
if (regnum
!= Reg_sparcv9_g0
) sp
->intreg
[regnum
] = val
;
} else { /* op == MA_LdFloat */
ASSERT(MA_LdFloat
== op
);
sp
->fpreg
.s32
[regnum
] = val
;
sp
->fpreg
.s64
[regnum
>> 1] = val
;
IMPL_WARNING(("ASI access (0x%02x) (@pc=0x%llx) to address 0x%llx currently unimplemented", asi
, sp
->pc
, addr
));
v9p
->post_precise_trap(sp
, Sparcv9_trap_illegal_instruction
);
* When the ERROR_TRAP_GEN is turned off the CPU Error handling related ASI
* access treated as noop . Later on even during normal execution, the hypervisor
* might do some amount of error register initialization which shouldn't cause
static bool_t
ss_error_asi_noop_access(simcpu_t
* sp
, maccess_t op
, uint_t regnum
, uint_t asi
, bool_t is_load
, tvaddr_t addr
)
* Match found. Treat ASI access as noop.
DBGERR( lprintf(sp
->gid
, "CPU Error handling related ASI 0x%x VA 0x%llx access treated as noop.\n", asi
, addr
); );
if (regnum
!= Reg_sparcv9_g0
) sp
->intreg
[regnum
] = 0;
* Slow generic memory access ..
* .. becomes the path for all the accesses we cant handle via the load/store hash
ss_memory_asi_access(simcpu_t
* sp
, maccess_t memop
, uint64_t * regp
,
mem_flags_t mflags
, uint_t asi
, uint_t context_type
,
uint_t align_mask
, tvaddr_t va
, tvaddr_t reg2
)
tvaddr_t tag
, perm_cache
;
v9p
= (sparcv9_cpu_t
*)(sp
->specificp
);
nsp
= v9p
->impl_specificp
;
npp
= (ss_proc_t
*)(sp
->config_procp
->procp
);
mflags
^= (asi
& SS_ASI_LE_MASK
) ? MF_Little_Endian
: 0;
/* OK, derive access address etc. */
size
= memop
& MA_Size_Mask
;
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: LE load/store pc=0x%llx instr=0x%x count=%d asi=0x%x\n", sp
->pc
, op
, (1 << size
), asi
); );
* OK - Step 1 : to do or not do a TLB translation.
* The assumption here is that privilege checks have already happened.
/* quick check of alignment */
if ((va
& (tvaddr_t
)align_mask
) != 0) {
if (v9p
->pstate
.addr_mask
)
va
&= MASK64(31,0); /* SV9_ID125 FIXME */
DBGALIGN( lprintf(sp
->gid
,"Miss data access pc=0x%llx va=0x%llx align_mask=0x%llx\n", sp
->pc
, va
, (tvaddr_t
)align_mask
); );
/* alignment error force a trap */
SET_DTLB_FAULT( nsp
, VA48(va
) );
if ((MA_ldfp64
== memop
|| MA_stfp64
== memop
) &&
tt
= ((memop
== MA_ldfp64
) ?
Sparcv9_trap_LDDF_mem_address_not_aligned
:
Sparcv9_trap_STDF_mem_address_not_aligned
);
tt
= Sparcv9_trap_mem_address_not_aligned
;
v9p
->post_precise_trap(sp
, tt
);
/* Find the pa corresponding to the line we need */
tag
= va
& XDCACHE_TAG_MASK
;
* We have to get the PA from the EA ... this depends on the mode
* and the type of access.
if (v9p
->pstate
.addr_mask
) {
/* NOTE: we dont mask tag ... we allow that to match the 64bit address */
flags
= SS_TLB_FLAG_READ
| SS_TLB_FLAG_WRITE
; /* default access flags */
* OK perform the TLB access based on the context
* and partition id selected
/* default read and write permission for MMU bypass */
perm_cache
= XDCACHE_READ_PERM
| XDCACHE_WRITE_PERM
;
if (!(mflags
& MF_MMU_Bypass
)) {
tlb_entry_t
*tep
, *tmp_tep
;
ss_trap_type_t miss_trap_type
;
uint_t context
, miss_context
;
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: performing TLB access \n"); );
/* If not priv mode and mmu is off, translate real addresses */
search_tlb_again
= false;
context
= SS_TLB_REAL_CONTEXT
;
/* figure out the context value */
ASSERT((mflags
& MF_TLB_Real_Ctx
) == 0);
miss_context
= context
= nsp
->pri_context
;
if (nsp
->pri_context
!= nsp
->pri_context1
)
ASSERT((mflags
& MF_TLB_Real_Ctx
) == 0);
miss_context
= context
= nsp
->sec_context
;
if (nsp
->sec_context
!= nsp
->sec_context1
)
if (mflags
& MF_TLB_Real_Ctx
)
context
= SS_TLB_REAL_CONTEXT
;
context
= SS_NUCLEUS_CONTEXT
;
fatal("ss_memory_asi_access: Internal Error. Not expecting "
"context type 0x%x\n", context_type
);
* check out of range address (if lie within the "VA hole"
if ((va
>= SS_VA_HOLE_LB
) && (va
<= SS_VA_HOLE_UB
)) {
* setup the right trap type
* (see N2 PRM, Table 13-15 and Table 13-16)
if (context
== SS_TLB_REAL_CONTEXT
)
tt
= N2_trap_mem_real_range
;
tt
= N2_trap_mem_address_range
;
SET_DTLB_FAULT( nsp
, VA48(va
) );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)tt
);
RW_rdlock(&tlbp
->rwlock
);
/* FIXME: Need a better hash than this ! */
idx
= va
>> SS_MAX_PAGE_SIZE_BITS
;
* So we search for a matching page using the info we have in the
* hash - while another thread might possibly be removing or
* inserting an entry into the same table.
for ( tep
= tlbp
->hash
[idx
].ptr
; tep
!=(tlb_entry_t
*)0; tep
= tep
->nextp
) {
/* try and match the entry as appropriate */
if (((tep
->tag_pfn
^ va
)>>tep
->match_shift
)==0 && tep
->match_context
==context
&& tep
->partid
== partid
) {
* Might need to search the TLB one more time based
* on the shared context value.
search_tlb_again
= false;
if (context_type
== ss_ctx_primary
)
context
= nsp
->pri_context1
;
context
= nsp
->sec_context1
;
RW_unlock(&tlbp
->rwlock
);
DBGMISS( lprintf(sp
->gid
, "dtlb miss: pc=%lx asi=%x va=%lx ctx=%x\n", sp
->pc
, asi
, va
, miss_context
); );
* If the MMU is "disabled" in privileged mode ... this is a real miss, not a
* virtual translation miss, so the fault context and trap type is different
if ((nsp
->dmmu
.enabled
) && (!(mflags
& MF_TLB_Real_Ctx
))) {
miss_trap_type
= ss_hardware_tablewalk(sp
, &(nsp
->dmmu
), tlbp
, va
, context_type
, &flags
, &pa_offset
);
if (miss_trap_type
== SS_trap_NONE
) {
miss_context
= 0; /* null for ra->pa miss undefined ? */
miss_trap_type
= SS_trap_data_real_translation_miss
;
nsp
->dmmu
.tag_access_reg
= (va
& ~MASK64(12,0)) | miss_context
; /* Do this properly later - FIXME */
SET_DTLB_FAULT( nsp
, va
);
DBGMMU( lprintf(sp
->gid
, "DMMU tag access = 0x%llx\n", nsp
->dmmu
.tag_access_reg
); );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)miss_trap_type
);
* try and match the entry again for multi-hit
for (tmp_tep
= tep
->nextp
; tmp_tep
!= (tlb_entry_t
*)0; tmp_tep
= tmp_tep
->nextp
) {
if (((tmp_tep
->tag_pfn
^ va
) >> tmp_tep
->match_shift
) == 0
&& tmp_tep
->match_context
== context
&& tmp_tep
->partid
== partid
) {
RW_unlock(&tlbp
->rwlock
);
DBGMMU( lprintf(sp
->gid
, "dtlb miss multi-hit: pc=%lx va=%lx ctx=%x\n",
DBGMMU( lprintf(sp
->gid
, " 0x%x %d 0x%llx 0x%llx\n",
tep
->tag_context
, tep
->match_shift
, tep
->tag_pfn
,
tep
->tag_pfn
+ tep
->pa_offset
); );
DBGMMU( lprintf(sp
->gid
, " 0x%x %d 0x%llx 0x%llx\n",
tmp_tep
->tag_context
, tmp_tep
->match_shift
,
tmp_tep
->tag_pfn
, tmp_tep
->tag_pfn
+ tmp_tep
->pa_offset
); );
v9p
->post_precise_trap(sp
, (sparcv9_trap_type_t
)SS_trap_data_access_MMU_error
);
/* we have a matching entry ... now all we have to worry about are the permissions */
pa_tag
+= tep
->pa_offset
;
RW_unlock(&tlbp
->rwlock
);
* Errors on dtlb hit: stash table_entry pointer and if
* subsequent itlb hit on same entry post error again.
if (dtlb_hit_error_match(sp
, op
, tep
, va
))
/* privilege test apparently takes priority ... p.51 US-I PRM table 6-4 */
if ((flags
& SS_TLB_FLAG_PRIV
) && !(mflags
& MF_Has_Priv
)) {
nsp
->dmmu
.tag_access_reg
= (va
& ~MASK64(12,0)) | (miss_context
); /* Do this properly later - FIXME */
miss_trap_type
= SS_trap_DAE_privilege_violation
;
* validate bits NFO, E and CP
if (!(flags
& SS_TLB_FLAG_CP
) && (mflags
& MF_Atomic_Access
)) {
nsp
->dmmu
.tag_access_reg
= (va
& ~MASK64(12,0)) | miss_context
;
miss_trap_type
= SS_trap_DAE_nc_page
;
if ((flags
& SS_TLB_FLAG_E
) && (mflags
& MF_No_Fault
)) {
nsp
->dmmu
.tag_access_reg
= (va
& ~MASK64(12,0)) | miss_context
;
miss_trap_type
= SS_trap_DAE_so_page
;
if ((flags
& SS_TLB_FLAG_NFO
) && (!(mflags
& MF_No_Fault
))) {
nsp
->dmmu
.tag_access_reg
= (va
& ~MASK64(12,0)) | miss_context
;
miss_trap_type
= SS_trap_DAE_NFO_page
;
if (IS_V9_MA_STORE(op
) && !(flags
& SS_TLB_FLAG_WRITE
)) {
nsp
->dmmu
.tag_access_reg
= (va
& ~MASK64(12,0)) | (miss_context
); /* Do this properly later - FIXME */
miss_trap_type
= SS_trap_fast_data_access_protection
;
mflags
^= (flags
& SS_TLB_FLAG_IE
) ? MF_Little_Endian
: 0;
perm_cache
= (flags
& SS_TLB_FLAG_WRITE
) ? XDCACHE_WRITE_PERM
: 0;
perm_cache
|= (flags
& SS_TLB_FLAG_READ
) ? XDCACHE_READ_PERM
: 0;
/* Niagara 2 only implements 40 bits of PA, the tlb code
masks PA so here we need to mask bypass PAs */
* OK - now go get the pointer to the line data
* ... start by finding the device that has the
* optimise: by guessing at the last device found.
/* now find the device - looking in the cache first */
cap
= sp
->xdc
.miss_addrp
;
if (!(cap
&& (cap
->baseaddr
<= pa
) && (pa
< cap
->topaddr
))) {
config_proc_t
* config_procp
;
config_procp
= sp
->config_procp
;
domainp
= config_procp
->domainp
;
cap
= find_domain_address(domainp
, pa
);
/* OK it's a bus error there was no backing store */
EXEC_WARNING(("bus error - (@pc=0x%llx, icount=%llu) "
"access to va=0x%llx (pid=0x%x,ctx_type=0x%x,cacheline "
"va=0x%llx -> physical 0x%llx)", sp
->pc
, ICOUNT(sp
),
va
, nsp
->partid
, context_type
, tag
, pa_tag
));
/* try and get the buffer pointer */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: calling dev_cacheable\n"); );
extent
= cap
->config_devp
->dev_typep
->dev_cacheable(cap
, da
,
pa_tag
-cap
->baseaddr
, &bufp
);
if (extent
< XDCACHE_LINE_SIZE
) {
uint64_t tempreg
, *aregp
;
/* Let device handle memory access operation */
/* bus error again ? or fill from multiple devices ? */
/* need to check validty for device here ... FIXME */
pio_op
= memop
& MA_Op_Mask
;
if ((MF_Little_Endian
& mflags
) && (pio_op
== MA_St
)) {
tempreg
= sparcv9_invert_endianess(regp
, (1 << size
));
} else if ((&(sp
->intreg
[Reg_sparcv9_g0
]) == regp
) &&
((pio_op
== MA_Ld
) || (pio_op
== MA_LdSigned
))) {
status
= cap
->config_devp
->dev_typep
->dev_cpu_access(sp
, cap
, pa
-cap
->baseaddr
, memop
, aregp
);
if ((MF_Little_Endian
& mflags
) && status
&& (pio_op
== MA_Ld
|| pio_op
== MA_LdSigned
)) {
*regp
= sparcv9_invert_endianess(regp
, (1 << size
));
if (pio_op
== MA_LdSigned
) {
shift
= 64 - (8 << size
);
*regp
= ((sint64_t
)(*regp
<< shift
)) >> shift
;
ASSERT(0LL == sp
->intreg
[Reg_sparcv9_g0
]);
EXEC_WARNING(("data access error - (@pc=0x%llx) access to va=0x%llx "
"(pid=0x%x,ctx_type=0x%x,physical 0x%llx)", sp
->pc
, va
,
nsp
->partid
, context_type
, pa
));
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: post_precise_trap \n"); );
v9p
->post_precise_trap(sp
, Sparcv9_trap_data_access_error
); /* FIXME: right trap ? */
* processor-wide checks for unhandled L2 and DRAM errors
if (l2dram_access_error_match(sp
, op
, pa
))
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: handling cacheable device memory\n"); );
* Now handle cacheable device memory
* Because we implicitly assume that the xdc uses the current context
* we only add missed entries to the xdc iff it was a normal memory op
if ((mflags
& (MF_Normal
|MF_Little_Endian
)) == MF_Normal
) {
sp
->xdc
.miss_addrp
= cap
; /* cache for next time */
ridx
= (va
>> XDCACHE_RAW_SHIFT
) & XDCACHE_RAW_LINE_MASK
;
xclp
= (xdcache_line_t
*)(((uint8_t*)&(sp
->xdc
.line
[0])) + ridx
);
/* only cache if memory is cacheable */
/* WARNING: This tag may be a full 64bit value even if pstate.am=1 */
/* do not use ea_offset with anything else other than tag */
xclp
->tag
= tag
| perm_cache
| sp
->tagstate
;
xclp
->offset
= ((uint64_t)bufp
) - tag
;
* Sigh now complete the load/store on behalf of the original
#if HOST_CPU_LITTLE_ENDIAN
mflags
^= MF_Little_Endian
;
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: completing load/store on behalf of original instr.\n"); );
ptr
= (uint8_t*)(bufp
+ (pa
& XDCACHE_LINE_OFFSET_MASK
) );
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_Ld with LE - val=0x%llx count=0x%x\n",
val
= sparcv9_invert_endianess(&val
, (1 << size
));
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_LdSigned with LE - val=0x%llx count=0x%x\n",
val
= sparcv9_invert_endianess(&val
, (1 << size
));
shift
= 64 - (8 << size
);
val
= ((sint64_t
)(val
<< shift
)) >> shift
;
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_St with LE - val=0x%llx\n", *regp
); );
val
= sparcv9_invert_endianess(regp
, (1 << size
));
if (mflags
& MF_Blk_Init
) {
/* If line in L2 cache, leave data alone, otherwise zero it */
/* XXX How to simulate? */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_LdFloat with LE - \n"); );
ASSERT(&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
);
if (MF_Little_Endian
& mflags
) {
*regp
= sparcv9_invert_endianess(&val
, sizeof (uint16_t));
if (MF_Little_Endian
& mflags
) {
val
= *(ieee_fp32_t
*)ptr
;
sparcv9_invert_endianess(&val
,
*(ieee_fp32_t
*)regp
= *(ieee_fp32_t
*)ptr
;
if (MF_Little_Endian
& mflags
)
sparcv9_invert_endianess(
*(ieee_fp64_t
*)regp
= *(ieee_fp64_t
*)ptr
;
if ((MF_Little_Endian
& mflags
) == 0) {
for (i
= 0; i
< 8; i
++) {
*(ieee_fp64_t
*)(regp
+ i
) =
*(ieee_fp64_t
*)(ptr
+ i
*8);
for (i
= 0; i
< 8; i
++) {
*(ieee_fp64_t
*)(regp
+ i
) =
sparcv9_invert_endianess(
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
ASSERT((MF_Little_Endian
& mflags
) == 0);
*(ieee_fp128_t
*)regp
= *(ieee_fp128_t
*)ptr
;
#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_StFloat with LE - \n"); );
*(uint8_t*)ptr
= (*regp
) & MASK64(7,0);
val
= (*regp
) & MASK64(15,0);
if (MF_Little_Endian
& mflags
)
sparcv9_invert_endianess(&val
,
if (MF_Little_Endian
& mflags
) {
val
= *(ieee_fp32_t
*)regp
;
sparcv9_invert_endianess(&val
,
*(ieee_fp32_t
*)ptr
= *(ieee_fp32_t
*)regp
;
val
= *(ieee_fp64_t
*)(ptr
);
val
&= ~MASK64(63-(i
*8),(56-(i
*8)));
val
|= (sparcv9_invert_endianess(regp
,
sizeof (ieee_fp64_t
)) & MASK64(63-(i
*8),(56-(i
*8))));
*(ieee_fp64_t
*)(ptr
) = (val
);
val
= (*(ieee_fp64_t
*)ptr
);
val
&= ~MASK64((i
*8)+7,i
*8);
val
|= (*(ieee_fp64_t
*)regp
& MASK64((i
*8)+7,i
*8));
*(ieee_fp64_t
*)(ptr
) = (val
);
val
= *(ieee_fp64_t
*)(ptr
);
val
&= ~MASK64(63-(i
*16),48-(i
*16));
val
|= (sparcv9_invert_endianess(regp
,
sizeof (ieee_fp64_t
)) & MASK64(63-(i
*16),(48-(i
*16))));
*(ieee_fp64_t
*)(ptr
) = (val
);
val
= (*(ieee_fp64_t
*)ptr
);
val
&= ~MASK64((i
*16)+15,i
*16);
val
|= (*(ieee_fp64_t
*)regp
& MASK64((i
*16)+15,i
*16));
*(ieee_fp64_t
*)(ptr
) = (val
);
val
= *(ieee_fp64_t
*)(ptr
);
val
&= ~MASK64(63-(i
*32),32-(i
*32));
val
|= (sparcv9_invert_endianess(regp
,
sizeof (ieee_fp64_t
)) & MASK64(63-(i
*32),(32-(i
*32))));
*(ieee_fp64_t
*)(ptr
) = (val
);
val
= (*(ieee_fp64_t
*)ptr
);
val
&= ~MASK64((i
*32)+31,i
*32);
val
|= (*(ieee_fp64_t
*)regp
& MASK64((i
*32)+31,i
*32));
*(ieee_fp64_t
*)(ptr
) = (val
);
if (MF_Little_Endian
& mflags
)
sparcv9_invert_endianess(regp
,
*(ieee_fp64_t
*)ptr
= *(ieee_fp64_t
*)regp
;
if ((MF_Little_Endian
& mflags
) == 0) {
for (i
= 0; i
< 8; i
++) {
*(ieee_fp64_t
*)(ptr
+ i
*8) =
*(ieee_fp64_t
*)(regp
+ i
);
for (i
= 0; i
< 8; i
++) {
*(ieee_fp64_t
*)(ptr
+ i
*8) =
sparcv9_invert_endianess(
#ifdef PROCESSOR_SUPPORTS_QUADFP /* { */
ASSERT((MF_Little_Endian
& mflags
) == 0);
*(ieee_fp128_t
*)ptr
= *(ieee_fp128_t
*)regp
;
#endif /* PROCESSOR_SUPPORTS_QUADFP */ /* } */
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_LdSt with LE - \n"); );
val
= host_ldstub(ptr
, reg2
, *regp
);
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_Swap with LE - \n"); );
if (MF_Little_Endian
& mflags
) {
val
= sparcv9_invert_endianess(regp
, (1 << size
));
val
= host_swap((uint32_t *)ptr
, val
);
if (MF_Little_Endian
& mflags
) {
val
= sparcv9_invert_endianess(&val
, (1 << size
));
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: MA_CAS with LE - \n"); );
if (MF_Little_Endian
& mflags
) {
val
= sparcv9_invert_endianess(regp
, (1 << size
));
cval
= sparcv9_invert_endianess(®2
, (1 << size
));
val
= host_cas32((uint32_t *)ptr
, cval
, val
);
val
= host_cas64((uint64_t *)ptr
, cval
, val
);
if (MF_Little_Endian
& mflags
) {
val
= sparcv9_invert_endianess(&val
, (1 << size
));
if (&(sp
->intreg
[Reg_sparcv9_g0
]) != regp
)
case MA_Size64
: /* standard sparc LDD instruction */
regp
[0] = (uint32_t)(val
>> 32);
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_ldDouble with LE - val=0x%llx count=0x%x\n",
regp
[0] = sparcv9_invert_endianess(®p
[0], (1 << size
)>>1);
regp
[1] = sparcv9_invert_endianess(®p
[1], (1 << size
)>>1);
sp
->intreg
[Reg_sparcv9_g0
] = 0; /* regp might be %g0 */
host_atomic_get128be((uint64_t *)ptr
, regp
, ®p
[1]);
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_ldDouble with LE - val=0x%llx,0x%llx count=0x%x\n",
regp
[0], regp
[1], (1 << size
)); );
regp
[0] = sparcv9_invert_endianess(®p
[0], (1 << size
)>>1);
regp
[1] = sparcv9_invert_endianess(®p
[1], (1 << size
)>>1);
sp
->intreg
[Reg_sparcv9_g0
] = 0; /* regp might be %g0 */
fatal("ss_memory_asi_access: internal error - "
"illegal size for MA_LdDouble");
ASSERT(size
== MA_Size64
);
if (MF_Little_Endian
& mflags
) {
DBGLE(lprintf(sp
->gid
, "SunSPARC::: MA_StDouble with LE - reven=0x%x rodd=0x%x count=0x%x\n",
(uint32_t)regp
[0], (uint32_t)regp
[1], (1 << size
)); );
reven
= (uint32_t)sparcv9_invert_endianess(®p
[0], (1 << size
)>>1);
rodd
= (uint32_t)sparcv9_invert_endianess(®p
[1], (1 << size
)>>1);
reven
= (uint32_t)regp
[0];
rodd
= (uint32_t)regp
[1];
val
= ((uint64_t)reven
<< 32) | ((uint32_t)rodd
);
ASSERT( MA_Size32
== size
);
if (MF_Little_Endian
& mflags
)
val
= sparcv9_invert_endianess(&val
, (1 << size
));
v9_set_fsr_lower(sp
, val
);
ASSERT( MA_Size64
== size
);
if (MF_Little_Endian
& mflags
)
val
= sparcv9_invert_endianess(&val
, (1 << size
));
ASSERT( MA_Size32
== size
);
if (MF_Little_Endian
& mflags
)
val
= sparcv9_invert_endianess(&val
, (1 << size
));
*(uint32_t*)ptr
= val
& MASK64(31,0);
/* FTT is cleared on read of FSR */
sp
->v9_fsr_ctrl
&= ~V9_FSR_FTT_MASK
;
DBGFSR( lprintf(sp
->gid
, "stfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp
->pc
, v9_get_fsr(sp
), val
); );
ASSERT( MA_Size64
== size
);
if (MF_Little_Endian
& mflags
)
val
= sparcv9_invert_endianess(&val
, (1 << size
));
/* FTT is cleared on read of FSR */
sp
->v9_fsr_ctrl
&= ~V9_FSR_FTT_MASK
;
DBGFSR( lprintf(sp
->gid
, "stxfsr: pc=0x%llx, fsr=0x%llx (was 0x%llx)\n", sp
->pc
, v9_get_fsr(sp
), val
); );
* Finally go get the next instruction
DBGLE( if (MF_Little_Endian
& mflags
) lprintf(sp
->gid
, "SunSPARC::: getting the next instr.\n"); );
* This function is called through the ASI store to the interrupt vector
* dispatch register ASI_INTR_W (0x73), the store value is passed in by 'val'.
* sp is the originator and tnpp is the target processor. On a
* single node system, sp will belong to tnpp. But on multinode systems,
* if the cross call is going across nodes, the sp will be on a different chip
void niagara2_send_xirq(simcpu_t
* sp
, ss_proc_t
* tnpp
, uint64_t val
)
uint_t strand
, vcore_id
, intr_id
;
vcore_id
= (val
& MASK64(13, 8)) >> 8;
vec_bit
= val
& MASK64(5,0);
/* normalize strand to internal strand */
strand
= STRANDID2IDX(tnpp
, vcore_id
);
if (!VALIDIDX(tnpp
, strand
)) {
fatal("[0x%llx] (pc=0x%llx)\tWrite to ASI_INTR_W with "
"illegal virtual core value 0x%llx. ",
sp
->gid
, sp
->pc
, vcore_id
);
tstrandp
= &(tnpp
->ss_strandp
[strand
]);
* check if the destination interrupt ID matches the ID in the interrupt Id register
* (Niagara 2 uses ASI_CMP_CORE_INTR_ID as the interrupt Id register. The Id is defined
* by bits 5:0, and it should match the virtual core Id, i.e., ASI_CMP_CORE_ID bit 5:0.
intr_id
= tstrandp
->vcore_id
;
if (intr_id
!= vcore_id
) {
fatal("[0x%llx] (pc=0x%llx)\tDetected a miss-matched interrupt Id: "
"dst_intr_id = 0x%llx cmp_core_intr_id = 0x%llx",
sp
->gid
, sp
->pc
, vcore_id
, intr_id
);
pthread_mutex_lock(&tstrandp
->irq_lock
);
pay_attention
= (0LL == tstrandp
->irq_vector
);
tstrandp
->irq_vector
|= (1LL<<vec_bit
);
pthread_mutex_unlock(&tstrandp
->irq_lock
);
DBGE(lprintf(sp
->gid
, "niagara2_send_xirq: target strand=%p irq_vector=0x%llx\n",
tstrandp
, tstrandp
->irq_vector
); );
nsp
= v9p
->impl_specificp
;
source_vcore_id
= nsp
->vcore_id
;
DBGMULNODE(lprintf(sp
->gid
, "niagara2_send_xirq from vcore_id %d on node %d"
" to vcore_id %d on node %d irq_vector=%llx\n",
source_vcore_id
, sp
->config_procp
->proc_id
, vcore_id
, tnpp
->config_procp
->proc_id
,
tstrandp
->irq_vector
); );
* The complicated part here is that the execution thread
* determines when the interrupt is actually delivered if at
* all, all we need to do here is to ensure that that thread
* pays attention to the fact the the interrupt vector status has
* changed .. we only care if it goes non-zero ...
tv9p
= tnpp
->strand
[strand
];
static uint64_t niagara2_ext_signal(config_proc_t
* config_procp
, ext_sig_t sigtype
, void *vp
)
uint_t thread_id
, device_id
, strand
, vec_bit
;
npp
= (ss_proc_t
*)(config_procp
->procp
);
device_id
= *(uint_t
*)vp
;
if ((device_id
< NCU_DEV_NIU_LB
) || (device_id
> NCU_DEV_NIU_UB
))
fatal("niagara2_ext_signal: NIU device_id 0x%lx out of range",
int_man
= ncup
->regs
.int_man
[device_id
];
thread_id
= NCU_INT_MAN_CPUID(int_man
);
vec_bit
= int_man
& INTR_VEC_MASK
;
int_man
= ncup
->regs
.int_man
[device_id
];
thread_id
= NCU_INT_MAN_CPUID(int_man
);
vec_bit
= int_man
& INTR_VEC_MASK
;
mondo
= *(pcie_mondo_t
*)vp
;
thread_id
= mondo
.thread_id
;
pthread_mutex_lock(&ncup
->ncu_lock
);
if (ncup
->regs
.mondo_int_busy
[thread_id
] & NCU_MONDO_INT_BUSY
) {
pthread_mutex_unlock(&ncup
->ncu_lock
);
ncup
->regs
.mondo_int_data0
[thread_id
] = mondo
.data
[0];
ncup
->regs
.mondo_int_data1
[thread_id
] = mondo
.data
[1];
ncup
->regs
.mondo_int_busy
[thread_id
] = NCU_MONDO_INT_BUSY
;
vec_bit
= ncup
->regs
.mondo_int_vec
& INTR_VEC_MASK
;
pthread_mutex_unlock(&ncup
->ncu_lock
);
case ES_LEGION_SAVE_STATE
:
for (i
=(npp
->nstrands
)-1; i
>=0; i
--) {
nsp
= (ss_strand_t
*)(v9p
->impl_specificp
);
nsp
->pending_async_tt
= SS_trap_legion_save_state
;
DBGE( lprintf(sp
->gid
, "ES_SAVE_STATE set_attention\n"); );
sp
->exception_pending
= true;
EXEC_WARNING(("processor%d: ext_signal %d ignored",
config_procp
->proc_id
, sigtype
));
* send IRQ interrupt to the target strand
strand
= STRANDID2IDX(npp
, thread_id
);
if (!VALIDIDX(npp
, strand
)) {
EXEC_WARNING(("niagara2_ext_signal called with illegal strand 0x%x", strand
));
nsp
= &(npp
->ss_strandp
[strand
]);
v9p
= npp
->strand
[strand
];
pthread_mutex_lock(&nsp
->irq_lock
);
pay_attention
= (0LL == nsp
->irq_vector
);
nsp
->irq_vector
|= 1LL << vec_bit
;
pthread_mutex_unlock(&nsp
->irq_lock
);
DBGE(lprintf(sp
->gid
, "niagara2_ext_signal: target strand=%p irq_vector=%llx\n",
nsp
, nsp
->irq_vector
); );
* CPU specific instruction decode routine. This routine is called from the main
* instruction decoder routine only when that routine comes up empty handed (i.e.
* before declaring it an illegal or unknown instruction.) For now, we have not
* implemented any frequently used CPU specific instuctions implemented for
* Niagara 2, and so the performance impact of making this function call is
* negligable since it doesn't happen in the common case.
* This routine returns a pointer to the exec function which is to be run as a
* result of encountering the instruction op code in question. Other than that,
* it is designed to be very similar to the main instruction decode routine
static op_funcp
niagara2_decode_me(simcpu_t
*sp
, xicache_instn_t
* xcip
, uint32_t instn
)
switch ((ty_code_t
)X_OP(instn
)) {
case Ty_0
: /* Branches and Sethi */
case Ty_1
: /* Call instructions */
case Ty_2
: /* Arithmetic and Misc instructions */
op2c
= (T2o3_code_t
)X_OP3(instn
);
/* register x immediate -> register forms */
if (!X_FMT4_CC2(instn
)) {
if (rd
== 0) goto n2_do_noop
;
/* We attempt to fast path movfcc_a ... */
if (X_FMT4_COND(instn
) == cond_n
) goto n2_do_noop
;
if (X_FMT4_COND(instn
) == cond_a
) {
SET_OP_MOVCC_CC(X_FMT4_CC(instn
));
SET_OP_MOVCC_COND(X_FMT4_COND(instn
));
switch( (cc4bit_t
)X_FMT4_CC(instn
) ) {
case CC4bit_icc
: SET_OP_MOVCC_CC(0); break;
case CC4bit_xcc
: SET_OP_MOVCC_CC(1); break;
SET_OP_ILL_REASON(movcc_illegal_cc_field
);
goto n2_illegal_instruction
;
if (rd
== 0) goto n2_do_noop
;
/* truncate simm - as only an 11 bit
* immediate in movcc instructions, not the
* 13 bit field we extracted above
if (X_FMT4_COND(instn
) == cond_n
) goto n2_do_noop
;
if (X_FMT4_COND(instn
) == cond_a
) goto n2_do_move_simm
;
SET_OP_MOVCC_COND(X_FMT4_COND(instn
));
if (rd
== 0 && rs1
==15) {
if (!CHECK_RESERVED_ZERO(instn
, 12, 7)) {
SET_OP_ILL_REASON(misc_reserved_field_non_zero
);
goto n2_illegal_instruction
;
simm
= X_MEMBAR_MASKS(instn
);
SET_OP_SIMM16(simm
); /* masks in immediates */
SET_OPv9( read_state_reg
);
SET_OPv9(save_imm
); /* rd == 0 determined in instn implemenation */
int fcn
= X_FMT2_FCN(instn
);
if (!CHECK_RESERVED_ZERO(instn
, 18, 0)) {
SET_OP_ILL_REASON(saved_reserved_field_non_zero
);
goto n2_illegal_instruction
;
SET_OP_ILL_REASON(saved_fcn_invalid
);
goto n2_illegal_instruction
;
switch(X_FMT3_FCN(instn
)) {
SET_OP_MISC_BITS((uint_t
)true);
SET_OP_MISC_BITS((uint_t
)false);
SET_OP_ILL_REASON(done_retry_illegal_fcn_field
);
goto n2_illegal_instruction
;
/* register x register -> register forms */
switch ((T3o3_fp36_opf_t
)X_FP_OPF(instn
)) {
goto n2_do_fp_s1d_s2d_dd
;
goto n2_do_fp_s1d_s2d_dd
;
goto n2_do_fp_s1d_s2d_ds
;
goto n2_do_fp_s1d_s2d_ds
;
goto n2_do_fp_s1d_s2d_dd
;
goto n2_do_fp_s1s_s2s_dd
;
goto n2_do_fp_s1s_s2s_dd
;
goto n2_do_fp_s1d_s2d_dx
;
goto n2_do_fp_s1d_s2d_dx
;
goto n2_do_fp_s1d_s2d_dx
;
goto n2_do_fp_s1d_s2d_dx
;
goto n2_do_fp_s1d_s2d_dx
;
goto n2_do_fp_s1d_s2d_dx
;
goto n2_do_fp_s1d_s2d_dx
;
goto n2_do_fp_s1d_s2d_dx
;
goto n2_do_fp_s1d_s2d_dd
;
goto n2_do_fp_s1d_s2d_dd
;
goto n2_do_fp_s1s_s2d_dd
;
goto n2_do_fp_s1s_s2s_dd
;
goto n2_do_fp_s1s_s2s_dd
;
case VISop36_fmuld8sux16
:
goto n2_do_fp_s1s_s2s_dd
;
case VISop36_fmuld8ulx16
:
goto n2_do_fp_s1s_s2s_dd
;
goto n2_unimplemented_visop
;
SET_OPv9(save_rrr
); /* rd == 0 determined in instn implemenation */
/* Rd == 0 handled by instruction */
goto n2_illegal_instruction
;
goto n2_done_retry_instn
;
case Ty_3
: /* Principally load/store operations */
#ifdef FP_DECODE_DISABLED
SET_OPv9(fp_unimplemented_instruction
);
#endif /* FP_DECODE_DISABLED */
SET_OP_ILL_REASON(unimplemented_visop
);
goto n2_illegal_instruction
;
SET_OPv9(illegal_instruction
);
void niagara2_get_pseudo_dev(config_proc_t
*config_procp
, char *dev_namep
, void *devp
)
npp
= (ss_proc_t
*)config_procp
->procp
;
if (strcmp(dev_namep
, PSEUDO_DEV_NAME_NCU
) == 0) {
*((void **) devp
) = (void *) npp
->ncup
;
} else if (strcmp(dev_namep
, PSEUDO_DEV_NAME_CCU
) == 0) {
*((void **) devp
) = (void *) npp
->clockp
;
} else if (strcmp(dev_namep
, PSEUDO_DEV_NAME_MCU
) == 0) {
*((void **) devp
) = (void *) npp
->mbankp
;
} else if (strcmp(dev_namep
, PSEUDO_DEV_NAME_L2C
) == 0) {
*((void **) devp
) = (void *) npp
->l2p
;
} else if (strcmp(dev_namep
, PSEUDO_DEV_NAME_SSI
) == 0) {
*((void **) devp
) = (void *) npp
->ssip
;
* Perform any processor specific parsing for "proc" elements in
* Legion config file. Returns true if token was handled by this
* function, false otherwise.
ss_parse_proc_entry(ss_proc_t
*procp
, domain_t
*domainp
)
if (streq(lex
.strp
, "node_id")) {
node_id
= parse_number_assign();
procp
->config_procp
->proc_id
= node_id
;
if (node_id
> MAX_NODEID
)
fatal("Invalid node_id %d in VF config file\n", node_id
);
/* Handled a Vfalls specific element */
/* Didn't match any Niagara2 specific element */
/* Perform any post parsing check that need to be made to the domain */
void niagara2_domain_check(domain_t
* domainp
)
* Currently, VF nodes in conf files should be sequential, starting
* from node 0 and not duplicated. See Section 11.9.1.16e of VF PRM Rev 0.1
* If no node_id field is specified in the conf file, then by default,
* sequential node_id's will be assigned. Otherwise, they can be defined
* in the conf file proc section by using the node_id field.
bool_t node
[MAX_NODEID
+ 1] = {false, false, false, false};
bool_t node_check
= true;
for (i
= 0; i
< domainp
->procs
.count
; i
++) {
node_id
= LIST_ENTRY(domainp
->procs
, i
)->proc_id
;
npp
= (ss_proc_t
*)LIST_ENTRY(domainp
->procs
, i
)->procp
;
if ((domainp
->procs
.count
> 1)) {
npp
->global_addressing_ok
.flags
.rsvd
= GLOBAL_ADDRESSING_FLAG_EN
;
npp
->global_addressing_ok
.flags
.multi_chip
= GLOBAL_ADDRESSING_FLAG_EN
;
npp
->global_addressing_ok
.flags
.lfu
= GLOBAL_ADDRESSING_FLAG_DIS
;
npp
->global_addressing_ok
.flags
.zambezi
= GLOBAL_ADDRESSING_FLAG_EN
;
npp
->global_addressing_ok
.all
= 0x0;
for (i
= 0; i
< domainp
->procs
.count
; i
++)
EXEC_WARNING(("Please make sure that processor node ids"
" in .conf file\n are sequential and unique"
" with node 0 being the lowest node\n"));