* ========== Copyright Header Begin ==========================================
* Hypervisor Software File: mmu.s
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* - Do no alter or remove copyright notices
* - Redistribution and use of this software in source and binary forms, with
* or without modification, are permitted provided that the following
* - Redistribution of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistribution in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of Sun Microsystems, Inc. or the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
* You acknowledge that this software is not designed, licensed or
* intended for use in the design, construction, operation or maintenance of
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
.ident "@(#)mmu.s 1.6 07/07/11 SMI"
#include <sys/asm_linkage.h>
! %g2 8k-aligned real addr from tag access
! XXX if hypervisor access then panic instead of watchdog_guest
VCPU2GUEST_STRUCT(%g1, %g7)
RA2PA_RANGE_CONV(%g7, %g2, %g5, 1f, %g4, %g3)
! tte valid, cp, writable, priv
or %g2, TTE_CP | TTE_P | TTE_W, %g2
mov TLB_IN_REAL, %g2 ! Real bit
stxa %g3, [%g2]ASI_DTLB_DATA_IN
RANGE_CHECK_IO(%g7, %g2, %g5, .rdmmu_miss_found, .rdmmu_miss_not_found,
! tte valid, e, writable, priv
or %g2, TTE_E | TTE_P | TTE_W, %g2
mov TLB_IN_REAL, %g2 ! Real bit
stxa %g3, [%g2]ASI_DTLB_DATA_IN
ALTENTRY(rdmmu_miss_not_found2)
mov MMU_FT_INVALIDRA, %g1
ba,pt %xcc, dmmu_err_common ! (%g1=ft, %g2=addr, %g3=ctx)
! %g2 8k-aligned real addr from tag access
ldxa [%g2]ASI_IMMU, %g2 /* tag access */
VCPU2GUEST_STRUCT(%g1, %g3)
RA2PA_RANGE_CONV(%g3, %g2, %g0, 1f, %g4, %g1)
! tte valid, cp, writable, priv
or %g2, TTE_CP | TTE_P | TTE_W, %g2
mov TLB_IN_REAL, %g2 ! Real bit
stxa %g1, [%g2]ASI_ITLB_DATA_IN
mov MMU_FT_INVALIDRA, %g1
ba,pt %xcc, immu_err_common ! (%g1=ft, %g2=addr, %g3=ctx)
* Normal tlb miss handlers
* NB: If it's possible to context switch a guest then
* the tag access register (tag target too?) needs to
/* %g1 contains per CPU area */
/* %g3 contains immu tag target */
VCPU2GUEST_STRUCT(%g1, %g6)
add %g6, GUEST_PERM_MAPPINGS_LOCK, %g2
SPINLOCK_ENTER(%g2, %g3, %g4)
* Look for a possible miss on a permanent entry.
* Note that the permanent mapping can have one of
* valid - TTE.V != 0. This is a valid mapping, check for
* a match. If not a match, continue the search
* with the next permanent mapping from the array.
* If it is a match, we have a hit, update the TLB
* invalid - TTE != 0 && TTE.V == 0. This is a TTE which has
* been used for a permanent mapping but has been
* subsequently unmapped, setting the TTE.V bit to 0.
* This is not a match, continue the search
* with the next permanent mapping from the array.
* invalid - TTE == 0 && TTE.V == 0. This is a TTE which is
* still uninitialised and has never been used for a
* permanent mapping. This means that the other
* entries in the permanent mapping array are also
* unused (as we always use the first available
* permanent mapping array element for a mapping) so
* we can stop searching for a permanent mapping now,
add %g6, GUEST_PERM_MAPPINGS, %g2
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
* for (i = NPERMMAPPINGS - 1; i >= 0; i--) {
* if (!table[i]->tte.v) {
* shift = TTE_PAGE_SHIFT(table[i]->tte);
* if ((table[i]->va >> shift) == (va >> shift)) {
!! %g2 = permanent mapping table base address
!! %g3 = current offset into table
* uninitialised, no more mappings, miss;
* initialised but invalid, get next, continue;
ldx [%g5 + MAPPING_TTE], %g6
brlz,pt %g6, 1f ! TTE.V == 1
brz,pt %g6, .ipmap_miss ! TTE == 0
ba,pt %xcc, .ipmap_continue ! TTE != 0 && TTE.V == 0
deccc GUEST_PERM_MAPPINGS_INCR, %g3
* (valid TTE, check for hit)
* shift = TTE_PAGE_SHIFT(m->tte);
* if ((m->va >> shift) == (va >> shift)) {
TTE_SHIFT_NOCHECK(%g6, %g7, %g4)
ldx [%g5 + MAPPING_VA], %g6
ldx [%g5 + MAPPING_TTE], %g5
deccc GUEST_PERM_MAPPINGS_INCR, %g3
bgeu,pt %xcc, .ipmap_loop
ba,a,pt %xcc, .ipmap_miss
!! %g5 = tte (with pa) of matching entry
stxa %g5, [%g0]ASI_ITLB_DATA_IN
inc GUEST_PERM_MAPPINGS_LOCK, %g6
VCPU_GUEST_STRUCT(%g1, %g6)
inc GUEST_PERM_MAPPINGS_LOCK, %g6
ba,pt %xcc, immu_miss_common
ldxa [%g0]ASI_IMMU, %g3 /* tag target */
/* %g1 contains per CPU area */
/* %g3 contains immu tag target */
ENTRY_NP(immu_miss_common)
ALTENTRY(immu_miss_ctxnon0)
bgu,pn %xcc, watchdog_guest /* enforce %gl <= MAXPGL */
ldx [%g1 + CPU_MMU_AREA], %g2
brz,pn %g2, watchdog_guest /* enforce CPU_MMU_AREA != 0 */
srlx %g3, TAGTRG_CTX_RSHIFT, %g4 /* ctx from tag target */
/* if ctx == 0 and ctx0 set TSBs used, take slow trap */
/* if ctx != 0 and ctxnon0 set TSBs used, take slow trap */
movrz %g4, CPU_NTSBS_CTX0, %g7
/* update MMU_FAULT_AREA_INSTR */
#ifdef TSBMISS_ALIGN_ADDR
ldxa [%g3]ASI_IMMU, %g3 /* tag access */
stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
#else /* !TSBMISS_ALIGN_ADDR */
ldxa [%g3]ASI_IMMU, %g3 /* tag target */
stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
#endif /* !TSBMISS_ALIGN_ADDR */
/* fast misses do not update MMU_FAULT_AREA_IFT with MMU_FT_FASTMISS */
wrpr %g0, TT_FAST_IMMU_MISS, %tt
add %g3, (TT_FAST_IMMU_MISS << TT_OFFSET_SHIFT), %g3
cmp %g2, 1 /* trap happended at TL=0 */
TRAP_GUEST(%g3, %g1, %g2)
/* update MMU_FAULT_AREA_INSTR */
#ifdef TSBMISS_ALIGN_ADDR
ldxa [%g3]ASI_IMMU, %g3 /* tag access */
stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
#else /* !TSBMISS_ALIGN_ADDR */
ldxa [%g3]ASI_IMMU, %g3 /* tag target */
stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
#endif /* !TSBMISS_ALIGN_ADDR */
stx %g4, [%g2 + MMU_FAULT_AREA_IFT]
wrpr %g0, TT_IMMU_MISS, %tt
add %g3, (TT_IMMU_MISS << TT_OFFSET_SHIFT), %g3
SET_SIZE(immu_miss_common)
SET_SIZE(immu_miss_ctxnon0)
/* %g1 contains per CPU area */
/* %g3 contains dmmu tag target */
VCPU2GUEST_STRUCT(%g1, %g6)
add %g6, GUEST_PERM_MAPPINGS_LOCK, %g2
SPINLOCK_ENTER(%g2, %g3, %g4)
* Look for a possible miss on a permanent entry.
* Note that the permanent mapping can have one of
* valid - TTE.V != 0. This is a valid mapping, check for
* a match. If not a match, continue the search
* with the next permanent mapping from the array.
* If it is a match, we have a hit, update the TLB
* invalid - TTE != 0 && TTE.V == 0. This is a TTE which has
* been used for a permanent mapping but has been
* subsequently unmapped, setting the TTE.V bit to 0.
* This is not a match, continue the search
* with the next permanent mapping from the array.
* invalid - TTE == 0 && TTE.V == 0. This is a TTE which is
* still uninitialised and has never been used for a
* permanent mapping. This means that the other
* entries in the permanent mapping array are also
* unused (as we always use the first available
* permanent mapping array element for a mapping) so
* we can stop searching for a permanent mapping now,
add %g6, GUEST_PERM_MAPPINGS, %g2
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
* for (i = NPERMMAPPINGS - 1; i >= 0; i--) {
* uninitialised, no more mappings, miss;
* if (!table[i]->tte.v) {
* initialised but invalid, get next, continue;
* (valid TTE, check for hit)
* shift = TTE_PAGE_SHIFT(table[i]->tte);
* if ((table[i]->va >> shift) == (va >> shift)) {
!! %g2 = permanent mapping table base address
!! %g3 = current offset into table
* uninitialised, no more mappings, miss;
* initialised but invalid, get next, continue;
ldx [%g5 + MAPPING_TTE], %g6
brlz,pt %g6, 1f ! TTE.V == 1
brz,pt %g6, .dpmap_miss ! TTE == 0
ba,pt %xcc, .dpmap_continue ! TTE != 0 && TTE.V == 0
deccc GUEST_PERM_MAPPINGS_INCR, %g3
* shift = TTE_PAGE_SHIFT(m->tte);
* if ((m->va >> shift) == (va >> shift)) {
TTE_SHIFT_NOCHECK(%g6, %g7, %g4)
ldx [%g5 + MAPPING_VA], %g6
ldx [%g5 + MAPPING_TTE], %g5
deccc GUEST_PERM_MAPPINGS_INCR, %g3
bgeu,pt %xcc, .dpmap_loop
ba,a,pt %xcc, .dpmap_miss
!! %g5 = tte (with pa) of matching entry
stxa %g5, [%g0]ASI_DTLB_DATA_IN
inc GUEST_PERM_MAPPINGS_LOCK, %g6
VCPU_GUEST_STRUCT(%g1, %g6)
inc GUEST_PERM_MAPPINGS_LOCK, %g6
ba,pt %xcc, dmmu_miss_common
ldxa [%g0]ASI_DMMU, %g3 /* tag target */
/* %g1 contains per CPU area */
/* %g3 contains dmmu tag target */
ENTRY_NP(dmmu_miss_common)
ALTENTRY(dmmu_miss_ctxnon0)
bgu,pn %xcc, watchdog_guest /* enforce %gl <= MAXPGL */
ldx [%g1 + CPU_MMU_AREA], %g2
brz,pn %g2, watchdog_guest /* enforce CPU_MMU_AREA != 0 */
srlx %g3, TAGTRG_CTX_RSHIFT, %g4 /* ctx from tag target */
/* if ctx == 0 and ctx0 set TSBs used, take slow trap */
/* if ctx != 0 and ctxnon0 set TSBs used, take slow trap */
movrz %g4, CPU_NTSBS_CTX0, %g7
/* update MMU_FAULT_AREA_DATA */
ldxa [%g3]ASI_DMMU, %g3 /* tag access */
stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
/* fast misses do not update MMU_FAULT_AREA_DFT with MMU_FT_FASTMISS */
wrpr %g0, TT_FAST_DMMU_MISS, %tt
add %g3, (TT_FAST_DMMU_MISS << TT_OFFSET_SHIFT), %g3
cmp %g2, 1 /* trap happened at TL=0 */
TRAP_GUEST(%g3, %g1, %g2)
/* update MMU_FAULT_AREA_DATA */
ldxa [%g3]ASI_DMMU, %g3 /* tag access */
stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
stx %g4, [%g2 + MMU_FAULT_AREA_DFT]
wrpr %g0, TT_DMMU_MISS, %tt
add %g3, (TT_DMMU_MISS << TT_OFFSET_SHIFT), %g3
SET_SIZE(dmmu_miss_common)
SET_SIZE(dmmu_miss_ctxnon0)
/* %g2 contains guest's miss info pointer (hv phys addr) */
* Update MMU_FAULT_AREA_DATA
ldxa [%g3]ASI_DMMU, %g3 /* tag access */
stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
/* fast misses do not update MMU_FAULT_AREA_DFT with MMU_FT_FASTPROT */
wrpr %g0, TT_FAST_DMMU_PROT, %tt /* already set? XXXQ */
add %g3, (TT_FAST_DMMU_PROT << TT_OFFSET_SHIFT), %g3
cmp %g2, 1 /* trap happened at tl=0 */
bgu,pn %xcc, watchdog_guest
TRAP_GUEST(%g3, %g1, %g2)
* set all TSB base registers to dummy
* call sequence and store a copy in
ENTRY_NP(set_dummytsb_ctx0)
ldx [%g1 + CONFIG_DUMMYTSB], %g1
stxa %g1, [%g2]ASI_MMU_TSB
stxa %g1, [%g2]ASI_MMU_TSB
stx %g1, [%g3 + (STRAND_MRA_INCR * 1)]
stxa %g1, [%g2]ASI_MMU_TSB
stx %g1, [%g3 + (STRAND_MRA_INCR * 2)]
stxa %g1, [%g2]ASI_MMU_TSB
stx %g1, [%g3 + (STRAND_MRA_INCR * 3)]
SET_SIZE(set_dummytsb_ctx0)
ENTRY_NP(set_dummytsb_ctxN)
ldx [%g1 + CONFIG_DUMMYTSB], %g1
stxa %g1, [%g2]ASI_MMU_TSB
stx %g1, [%g3 + (STRAND_MRA_INCR * 4)]
stxa %g1, [%g2]ASI_MMU_TSB
stx %g1, [%g3 + (STRAND_MRA_INCR * 5)]
stxa %g1, [%g2]ASI_MMU_TSB
stx %g1, [%g3 + (STRAND_MRA_INCR * 6)]
stxa %g1, [%g2]ASI_MMU_TSB
stx %g1, [%g3 + (STRAND_MRA_INCR * 7)]
SET_SIZE(set_dummytsb_ctxN)
* Initialize hardware tablewalk configuration registers.
* If no value has been set in the MD, the default is to set
ldx [%g1 + CONFIG_SYS_HWTW_MODE], %g1
movrlz %g1, HWTW_PREDICT_MODE, %g1
cmp %g1, HWTW_PREDICT_MODE
movg %xcc, HWTW_PREDICT_MODE, %g1
stxa %g1, [%g2]ASI_MMU_CFG
mov MMU_REAL_RANGE_0, %g1
stxa %g0, [%g1]ASI_MMU_HWTW
mov MMU_REAL_RANGE_1, %g1
stxa %g0, [%g1]ASI_MMU_HWTW
mov MMU_REAL_RANGE_2, %g1
stxa %g0, [%g1]ASI_MMU_HWTW
mov MMU_REAL_RANGE_3, %g1
stxa %g0, [%g1]ASI_MMU_HWTW
* %g1 - contains the Data Fault Type
ALTENTRY(dmmu_err_common)
ldx [%g4 + CPU_MMU_AREA], %g4
brz,pn %g4, watchdog_guest
stx %g1, [%g4 + MMU_FAULT_AREA_DFT]
stx %g2, [%g4 + MMU_FAULT_AREA_DADDR]
stx %g3, [%g4 + MMU_FAULT_AREA_DCTX]
sllx %g2, TT_OFFSET_SHIFT, %g2
bgu,pn %xcc, watchdog_guest
TRAP_GUEST(%g1, %g2, %g3)
SET_SIZE(dmmu_err_common)
* %g1 - contains the Instruction Fault Type
ALTENTRY(immu_err_common)
ldx [%g4 + CPU_MMU_AREA], %g4
brz,pn %g4, watchdog_guest
stx %g1, [%g4 + MMU_FAULT_AREA_IFT]
stx %g2, [%g4 + MMU_FAULT_AREA_IADDR]
stx %g3, [%g4 + MMU_FAULT_AREA_ICTX]
sllx %g2, TT_OFFSET_SHIFT, %g2
bgu,pn %xcc, watchdog_guest
TRAP_GUEST(%g1, %g2, %g3)
SET_SIZE(immu_err_common)
* instruction_invalid_TSB_entry trap
* Find the RA for the VA from the Tag Access register.
* Get the PA of the TTE from each D-TSB pointer register.
* Read the TTE Tag/data from that PA and check whether the
* tag matches. If we have a match, get the RA from the TTE data.
* N2 HWTW puts the PA of the four TSB entries it checked into
* the MMU I/D-TSB Pointer registers.
srlx %g2, TAGTRG_CTX_RSHIFT, %g4
!! %g1 TSB Config Register
!! %g3 TSB Pointer Register
ldxa [%g1]ASI_MMU_TSB, %g5 ! %g5 TSB Config
ldxa [%g3]ASI_MMU_TSB, %g6 ! %g6 PA of I-TSB entry
! load the TTE tag/data from the TSB
ldda [%g6]ASI_QUAD_LDD, %g4 ! %g4 tag, %g5 data
brgez,pn %g5, 1f ! check TTE.data.v bit 63,
! if not set, TTE invalid
cmp %g4, %g2 ! TTE.Tag == Tag Target ?
be,pn %xcc, .itsb_err_RA_found
! get next TSB pointer and configuration register
inc 8, %g1 ! TSB Config + 8
inc 8, %g3 ! ITSB_PTR VA + 8
! no TTE found for this VA. That must mean it got evicted from
!! %g1 TSB Config Register
!! %g3 TSB Pointer Register
ldxa [%g1]ASI_MMU_TSB, %g7 ! %g7 TSB Config
ldxa [%g3]ASI_MMU_TSB, %g6 ! %g6 PA of I-TSB entry
! load the TTE tag/data from the TSB
ldda [%g6]ASI_QUAD_LDD, %g4 ! %g4 tag, %g5 data
brgez,pn %g5, 1f ! check TTE.data.v bit 63,
! if not set, TTE invalid
* Check whether "use-context-0" or "use-context-1" is in effect
* if so, ignore the context when checking for a tag match.
srlx %g7, TSB_CFG_USE_CTX1_SHIFT, %g7
and %g7, (USE_TSB_PRIMARY_CTX | USE_TSB_SECONDARY_CTX), %g7
sllx %g2, TAGTRG_VA_LSHIFT, %g6 ! clear [63:42] of Tag Target
srlx %g6, TAGTRG_VA_LSHIFT, %g6 ! (context)
movrz %g7, %g2, %g6 ! go with masked Tag Target?
cmp %g4, %g6 ! TTE.tag == Tag Target ?
be,pn %xcc, .itsb_err_RA_found
! get next TSB pointer and configuration register
inc 8, %g1 ! TSB Config + 8
inc 8, %g3 ! ITSB_PTR + 8
! no TTE found for this VA. That must mean it got evicted from
! found the TSB entry for the VA
! %g5 TTE.data, RA is bits[55:13]
sllx %g5, 13 + 63 - 55, %g5
srlx %g5, 63 - 55, %g2 ! RA -> %g2
srlx %g2, RA_55_40_SHIFT, %g3
brnz,pn %g3, .itsb_invalid_ra_err
* Find the guest memory segment that contains this RA
* If this RA is not allocated to the guest, revector to the
* guests trap handler. Note that this can be either a
RA_GET_SEGMENT(%g5, %g2, %g3, %g4)
* If we have a valid segment for this RA, set up the RA->PA
* translation in the MMU HWTW range/offset registers
brnz,pn %g3, .tsb_err_check_hwtw_regs
* No valid guest memory segment for this RA -or-
sllx %g2, TT_OFFSET_SHIFT, %g2
bgu,pn %xcc, watchdog_guest
ldx [%g3 + CPU_MMU_AREA], %g3
brz,pn %g3, watchdog_guest ! Nothing we can do about this
stx %g4, [%g3 + MMU_FAULT_AREA_IADDR]
stx %g5, [%g3 + MMU_FAULT_AREA_ICTX]
mov MMU_FT_INVALIDRA, %g6
stx %g6, [%g3 + MMU_FAULT_AREA_IFT]
TRAP_GUEST(%g1, %g2, %g3)
* data_invalid_TSB_entry trap
* Find the RA for the VA from the Tag Access register.
* Get the PA of the TTE from each D-TSB pointer register.
* Read the TTE Tag/data from that PA and check whether the
* tag matches. If we have a match, get the RA from the TTE data.
* N2 HWTW puts the PA of the four TSB entries it checked into
* the MMU I/D-TSB Pointer registers.
srlx %g2, TAGTRG_CTX_RSHIFT, %g4
!! %g1 TSB Config Register
!! %g3 TSB Pointer Register
ldxa [%g1]ASI_MMU_TSB, %g5 ! %g5 TSB Config
ldxa [%g3]ASI_MMU_TSB, %g6 ! %g6 PA of D-TSB entry
! load the TTE tag/data from the TSB
ldda [%g6]ASI_QUAD_LDD, %g4 ! %g4 tag, %g5 data
brgez,pn %g5, 1f ! check TTE.data.v bit 63,
! if not set, TTE invalid
cmp %g4, %g2 ! TTE.Tag == Tag Target ?
be,pn %xcc, .dtsb_err_RA_found
! get next TSB pointer and configuration register
inc 8, %g1 ! TSB Config + 8
inc 8, %g3 ! DTSB_PTR VA + 8
! no TTE found for this VA. That must mean it got evicted from
!! %g1 TSB Config Register
!! %g3 TSB Pointer Register
ldxa [%g1]ASI_MMU_TSB, %g7 ! %g7 TSB Config
ldxa [%g3]ASI_MMU_TSB, %g6 ! %g6 PA of D-TSB entry
! load the TTE tag/data from the TSB
ldda [%g6]ASI_QUAD_LDD, %g4 ! %g4 tag, %g5 data
brgez,pn %g5, 1f ! check TTE.data.v bit 63,
! if not set, TTE invalid
* Check whether "use-context-0" or "use-context-1" is in effect
* if so, ignore the context when checking for a tag match.
srlx %g7, TSB_CFG_USE_CTX1_SHIFT, %g7
and %g7, (USE_TSB_PRIMARY_CTX | USE_TSB_SECONDARY_CTX), %g7
sllx %g2, TAGTRG_VA_LSHIFT, %g6 ! clear [63:42] of Tag Target
srlx %g6, TAGTRG_VA_LSHIFT, %g6 ! (context)
movrz %g7, %g2, %g6 ! go with masked Tag Target?
cmp %g4, %g6 ! TTE.tag == Tag Target ?
be,pn %xcc, .dtsb_err_RA_found
! get next TSB pointer and configuration register
inc 8, %g1 ! TSB Config + 8
inc 8, %g3 ! DTSB_PTR VA + 8
! no TTE found for this VA. That must mean it got evicted from
! found the TSB entry for the VA
! %g5 TTE.data, RA is bits[55:13]
sllx %g5, 13 + 63 - 55, %g5
srlx %g5, 63 - 55, %g2 ! RA -> %g2
srlx %g2, RA_55_40_SHIFT, %g3
brnz,a,pn %g3, .dtsb_invalid_ra_err
* Find the guest memory segment that contains this RA
* If this RA is not allocated to the guest, revector to the
* guests trap handler. Note that this can be either a
RA_GET_SEGMENT(%g5, %g2, %g3, %g4)
brz,pn %g3, .dtsb_invalid_ra_err
* We have a valid guest memory segment for this RA. Use this
* to populate one of the MMU Real Range/Physical Offset registers
* Find the first disabled Real range/offset registers. If all are
* enabled, disable all four range/offset pairs and start again
.tsb_err_check_hwtw_regs:
! %g3 guest memory segment
mov MMU_REAL_RANGE_0, %g4
ldxa [%g4]ASI_MMU_HWTW, %g5
brgez,pn %g5, .tsb_err_ra_hwtw_insert ! enable, (bit 63), not set
mov MMU_REAL_RANGE_1, %g4
ldxa [%g4]ASI_MMU_HWTW, %g5
brgez,pn %g5, .tsb_err_ra_hwtw_insert ! enable, (bit 63), not set
mov MMU_REAL_RANGE_2, %g4
ldxa [%g4]ASI_MMU_HWTW, %g5
brgez,pn %g5, .tsb_err_ra_hwtw_insert ! enable, (bit 63), not set
mov MMU_REAL_RANGE_3, %g4
ldxa [%g4]ASI_MMU_HWTW, %g5
brgez,pn %g5, .tsb_err_ra_hwtw_insert ! enable, (bit 63), not set
! all the HWTW range/offsets in use, disable them all
mov MMU_REAL_RANGE_0, %g4
stxa %g0, [%g4]ASI_MMU_HWTW
mov MMU_REAL_RANGE_1, %g4
stxa %g0, [%g4]ASI_MMU_HWTW
mov MMU_REAL_RANGE_2, %g4
stxa %g0, [%g4]ASI_MMU_HWTW
mov MMU_REAL_RANGE_3, %g4
stxa %g0, [%g4]ASI_MMU_HWTW
! fall through, leave range/offset 0/1/2 for next time to save
* Insert the base/limit/offset from the guest memory segment into
* the MMU Real Range/Physical Offset registers.
* Note that the base/limit/offset are >> 13 for the MMU HWTW registers
* %g3 guest memory segment
* %g4 VA of ASI_MMU_HWTW of REAL_RANGE
* %g5 VA of ASI_MMU_HWTW of PHYS_OFFSET
sllx %g2, 63, %g2 ! MMU Real Range enable bit[63]
ldx [%g3 + RA2PA_SEGMENT_LIMIT], %g6
sllx %g6, REALRANGE_BOUNDS_SHIFT, %g6 ! MMU Real Range limit bits[53:27]
ldx [%g3 + RA2PA_SEGMENT_BASE], %g6
sllx %g6, REALRANGE_BASE_SHIFT, %g6 ! MMU Real Range base bits[26:0]
stxa %g2, [%g4]ASI_MMU_HWTW ! MMU Real Range
ldx [%g3 + RA2PA_SEGMENT_OFFSET], %g6
sllx %g6, PHYSOFF_SHIFT, %g6
stxa %g6, [%g5]ASI_MMU_HWTW ! MMU Physical Offset
* Now we have a valid RA->PA translation ready for the VA, the HWTW
* TSB TTE RA->PA translation will succeed so we just re-execute
* No valid guest memory segment for this RA -or-
sllx %g2, TT_OFFSET_SHIFT, %g2
bgu,pn %xcc, watchdog_guest
ldx [%g3 + CPU_MMU_AREA], %g3
brz,pn %g3, watchdog_guest ! Nothing we can do about this
stx %g0, [%g3 + MMU_FAULT_AREA_DADDR] /* XXX */
stx %g5, [%g3 + MMU_FAULT_AREA_DCTX]
mov MMU_FT_INVALIDRA, %g6
stx %g6, [%g3 + MMU_FAULT_AREA_DFT]
TRAP_GUEST(%g1, %g2, %g3)