* ========== Copyright Header Begin ==========================================
* Hypervisor Software File: hcall_mmu.s
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* - Do no alter or remove copyright notices
* - Redistribution and use of this software in source and binary forms, with
* or without modification, are permitted provided that the following
* - Redistribution of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistribution in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of Sun Microsystems, Inc. or the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
* You acknowledge that this software is not designed, licensed or
* intended for use in the design, construction, operation or maintenance of
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
.ident "@(#)hcall_mmu.s 1.99 07/06/20 SMI"
#include <sys/asm_linkage.h>
ENTRY_NP(hcall_mmu_tsb_ctx0)
VCPU_GUEST_STRUCT(%g5, %g6)
/* set cpu->ntsbs to zero now in case we error exit */
stx %g0, [%g5 + CPU_NTSBS_CTX0]
/* Also zero out H/W bases */
btst TSBD_ALIGNMENT - 1, %o1
bnz,pn %xcc, herr_badalign
sllx %o0, TSBD_SHIFT, %g3
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g3, herr_noraddr, %g2, %g1)
add %g5, CPU_TSBDS_CTX0, %g2
/* loop over each TSBD and validate */
add %g5, CPU_TSBDS_CTX0, %g2
/* check pagesize - accept only valid encodings */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
bgeu,pn %xcc, herr_badpgsz
btst TTE_VALIDSIZEARRAY, %g3
/* check that pageszidx is set in pageszmask */
lduw [%g2 + TSBD_PGSZS_OFF], %g4
/* check that pageszidx is lowest-order bit of pageszmask */
/* check associativity - only support 1-way */
lduh [%g2 + TSBD_ASSOC_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g3
sll %g4, TSB_MAX_SZCODE, %g4
/* check context index field - must be -1 (shared) or zero */
ld [%g2 + TSBD_CTX_INDEX], %g3
cmp %g3, TSBD_CTX_IDX_SHARE
brnz,pn %g3, herr_inval ! only one set of context regs
/* check reserved field - must be zero for now */
ldx [%g2 + TSBD_RSVD_OFF], %g3
/* check TSB base real address */
ldx [%g2 + TSBD_BASE_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g4
sllx %g4, TSBE_SHIFT, %g4
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, herr_noraddr, %g7, %g2)
add %g5, CPU_TSBDS_CTX0, %g2
/* range OK, check alignment */
bnz,pn %xcc, herr_badalign
/* now setup H/W TSB regs */
/* only look at first two TSBDs for now */
add %g5, CPU_TSBDS_CTX0, %g2
ldx [%g2 + TSBD_BASE_OFF], %g1
RA2PA_CONV(%g6, %g1, %g1, %g4)
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS0
stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS0
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
stxa %g3, [%g0]ASI_DTSB_CONFIG_CTX0 ! (PS0 only)
stxa %g3, [%g0]ASI_ITSB_CONFIG_CTX0 ! (PS0 only)
/* process second TSBD, if available */
add %g2, TSBD_BYTES, %g2 ! move to next TSBD
ldx [%g2 + TSBD_BASE_OFF], %g1
RA2PA_CONV(%g6, %g1, %g1, %g4)
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS1
stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS1
/* %g3 still has old CONFIG value. */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g7
sllx %g7, ASI_TSB_CONFIG_PS1_SHIFT, %g7
stxa %g3, [%g0]ASI_DTSB_CONFIG_CTX0 ! (PS0 + PS1)
stxa %g3, [%g0]ASI_ITSB_CONFIG_CTX0 ! (PS0 + PS1)
stx %o0, [%g5 + CPU_NTSBS_CTX0]
clr %o1 ! no return value
SET_SIZE(hcall_mmu_tsb_ctx0)
ENTRY_NP(hcall_mmu_tsb_ctxnon0)
VCPU_GUEST_STRUCT(%g5, %g6)
/* set cpu->ntsbs to zero now in case we error exit */
stx %g0, [%g5 + CPU_NTSBS_CTXN]
/* Also zero out H/W bases */
btst TSBD_ALIGNMENT - 1, %o1
bnz,pn %xcc, herr_badalign
sllx %o0, TSBD_SHIFT, %g3
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g3, herr_noraddr, %g2, %g1)
/* xcopy(tsbs, cpu->tsbds, ntsbs*TSBD_BYTES) */
add %g5, CPU_TSBDS_CTXN, %g2
/* loop over each TSBD and validate */
add %g5, CPU_TSBDS_CTXN, %g2
/* check pagesize - accept only valid encodings */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
bgeu,pn %xcc, herr_badpgsz
btst TTE_VALIDSIZEARRAY, %g3
/* check that pageszidx is set in pageszmask */
lduw [%g2 + TSBD_PGSZS_OFF], %g4
/* check that pageszidx is lowest-order bit of pageszmask */
/* check associativity - only support 1-way */
lduh [%g2 + TSBD_ASSOC_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g3
sll %g4, TSB_MAX_SZCODE, %g4
/* check context index field - must be -1 (shared) or zero */
ld [%g2 + TSBD_CTX_INDEX], %g3
cmp %g3, TSBD_CTX_IDX_SHARE
brnz,pn %g3, herr_inval ! only one set of context regs
/* check reserved field - must be zero for now */
ldx [%g2 + TSBD_RSVD_OFF], %g3
/* check TSB base real address */
ldx [%g2 + TSBD_BASE_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g4
sllx %g4, TSBE_SHIFT, %g4
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, herr_noraddr, %g7, %g2)
add %g5, CPU_TSBDS_CTXN, %g2
/* range OK, check alignment */
bnz,pn %xcc, herr_badalign
/* now setup H/W TSB regs */
/* only look at first two TSBDs for now */
add %g5, CPU_TSBDS_CTXN, %g2
ldx [%g2 + TSBD_BASE_OFF], %g1
RA2PA_CONV(%g6, %g1, %g1, %g4)
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS0
stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS0
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
stxa %g3, [%g0]ASI_DTSB_CONFIG_CTXN ! (PS0 only)
stxa %g3, [%g0]ASI_ITSB_CONFIG_CTXN ! (PS0 only)
/* process second TSBD, if available */
add %g2, TSBD_BYTES, %g2 ! move to next TSBD
ldx [%g2 + TSBD_BASE_OFF], %g1
RA2PA_CONV(%g6, %g1, %g1, %g4)
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS1
stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS1
/* %g3 still has old CONFIG value. */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g7
sllx %g7, ASI_TSB_CONFIG_PS1_SHIFT, %g7
stxa %g3, [%g0]ASI_DTSB_CONFIG_CTXN ! (PS0 + PS1)
stxa %g3, [%g0]ASI_ITSB_CONFIG_CTXN ! (PS0 + PS1)
stx %o0, [%g5 + CPU_NTSBS_CTXN]
clr %o1 ! no return value
SET_SIZE(hcall_mmu_tsb_ctxnon0)
ENTRY_NP(hcall_mmu_tsb_ctx0_info)
VCPU_GUEST_STRUCT(%g5, %g6)
! actual ntsbs always returned in %o1, so save tsbs now
! Check to see if ntsbs fits into the supplied buffer
ldx [%g5 + CPU_NTSBS_CTX0], %o1
btst TSBD_ALIGNMENT - 1, %g4
bnz,pn %xcc, herr_badalign
sllx %o1, TSBD_SHIFT, %g3
! %g3 size of tsbd in bytes
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g4, %g3, herr_noraddr, %g1, %g2)
! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
add %g5, CPU_TSBDS_CTX0, %g1
SET_SIZE(hcall_mmu_tsb_ctx0_info)
ENTRY_NP(hcall_mmu_tsb_ctxnon0_info)
VCPU_GUEST_STRUCT(%g5, %g6)
! actual ntsbs always returned in %o1, so save tsbs now
! Check to see if ntsbs fits into the supplied buffer
ldx [%g5 + CPU_NTSBS_CTXN], %o1
btst TSBD_ALIGNMENT - 1, %g4
bnz,pn %xcc, herr_badalign
sllx %o1, TSBD_SHIFT, %g3
! %g3 size of tsbd in bytes
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g4, %g3, herr_noraddr, %g1, %g2)
! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
add %g5, CPU_TSBDS_CTXN, %g1
SET_SIZE(hcall_mmu_tsb_ctxnon0_info)
* mmu_map_addr - stuff ttes directly into the tlbs
ENTRY_NP(hcall_mmu_map_addr)
PRINT("mmu_map_addr: va=0x")
VCPU_GUEST_STRUCT(%g1, %g6)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
sub %g4, 1, %g5 ! %g5 page mask
srlx %g2, 64 - 40 + 13, %g2
sllx %g2, 13, %g2 ! %g2 real address
xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
/* FIXME: This eventually to also cover the IO
* address ranges, and TTE flags as appropriate
RA2PA_RANGE_CONV(%g6, %g2, %g4, 3f, %g5, %g7)
4: or %g3, %g2, %g1 ! %g1 new tte with pa
or %o0, %o1, %g2 ! %g2 tag
mov MMU_TAG_ACCESS, %g3 ! %g3 tag_access
CLEAR_TTE_LOCK_BIT(%g1, %g4)
set TLB_IN_4V_FORMAT, %g5 ! %g5 sun4v-style tte selection
stxa %g1, [%g5]ASI_DTLB_DATA_IN
! condition codes still set
stxa %g1, [%g5]ASI_ITLB_DATA_IN
RANGE_CHECK_IO(%g6, %g2, %g4, .hcall_mmu_map_addr_io_found,
.hcall_mmu_map_addr_io_not_found, %g1, %g5)
.hcall_mmu_map_addr_io_found:
.hcall_mmu_map_addr_io_not_found:
! %g3 = TTE without PA/RA field
! FIXME: This test to be subsumed when we fix the RA mappings
set GUEST_LDC_MAPIN_BASERA, %g7
bneg,pn %xcc, herr_noraddr
set GUEST_LDC_MAPIN_SIZE, %g7
! check regs passed in to mapin_ra:
bneg,pt %xcc, ldc_map_addr_api
ENTRY_NP(hcall_mmu_map_addr_ra_not_found)
SET_SIZE(hcall_mmu_map_addr)
ENTRY_NP(hcall_mmu_unmap_addr)
PRINT("mmu_unmap_addr: va=0x")
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o2, herr_inval)
set (NCTXS - 1), %g2 ! 8K page mask
ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
stxa %o1, [%g1]ASI_MMU ! switch to new ctx
stxa %g0, [%g2]ASI_IMMU_DEMAP
stxa %g0, [%g2]ASI_DMMU_DEMAP
2: stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
SET_SIZE(hcall_mmu_unmap_addr)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_page)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_VA_CTX(%o2, %o3, herr_inval, %g2)
CHECK_MMU_FLAGS(%o4, herr_inval)
stxa %g0, [%g2]ASI_IMMU_DEMAP
stxa %g0, [%g2]ASI_DMMU_DEMAP
2: stxa %g3, [%g1]ASI_MMU ! restore primary ctx
SET_SIZE(hcall_mmu_demap_page)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_ctx)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_CTX(%o2, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
set TLB_DEMAP_CTX_TYPE, %g3
stxa %g0, [%g3]ASI_IMMU_DEMAP
stxa %g0, [%g3]ASI_DMMU_DEMAP
2: stxa %g7, [%g2]ASI_MMU ! restore primary ctx
SET_SIZE(hcall_mmu_demap_ctx)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_all)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_MMU_FLAGS(%o2, herr_inval)
set TLB_DEMAP_ALL_TYPE, %g3
stxa %g0, [%g3]ASI_IMMU_DEMAP
stxa %g0, [%g3]ASI_DMMU_DEMAP
SET_SIZE(hcall_mmu_demap_all)
* arg1 context (%o1) must be zero
ENTRY_NP(hcall_mmu_map_perm_addr)
PRINT("mmu_map_perm_addr: va=0x")
VCPU_GUEST_STRUCT(%g1, %g6)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
! Fail if tte isn't valid
! Fail if flags indicate ITLB, but no execute perm
#if 1 /* FIXME: Hack for broken OBP */
TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
sub %g4, 1, %g5 ! %g5 page mask
! Fail if page-offset bits aren't zero
srlx %g2, 64 - 56 + 13, %g2
sllx %g2, 13, %g2 ! %g2 real address
xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
bne,pn %xcc, herr_inval ! if RA not page size aligned
andn %g2, %g5, %g2 ! Align RA to page size
! %g3 = TTE with RA field zeroed
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g2, %g4, herr_noraddr, %g5, %g7)
! Force clear TTE lock bit
CLEAR_TTE_LOCK_BIT(%o2, %g5)
sub %g4, 1, %g3 ! page mask
add %g6, GUEST_PERM_MAPPINGS_LOCK, %g7
SPINLOCK_ENTER(%g7, %g2, %g5)
/* Search for existing perm mapping */
add %g6, GUEST_PERM_MAPPINGS, %g1
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g6
! %g1 = perm mappings list
* Skim mapping entries for potential conflict.
* NOTE: Start at end of array so we prefer to fill
* empty slots earlier on in the perm-mapping array.
* for (i=NPERMAPPINGS-1; i>=0 i--) {
* if (((addr & table[i].mask)^table[i].tag) & mask) == 0) {
* matching entry ... write over it.
ldda [ %g1 + %g6 ] ASI_QUAD_LDD, %g4 ! Ld Tag (g4) + TTE (g5)
brgez,a,pn %g5, .pml_next_loop
mov %g6, %o1 ! del-slot executed if branch taken
and %g5, TTE_SZ_MASK, %g2
add %g2, %g7, %g7 ! Mult by 3
add %g7, 13, %g7 ! Add 13
sllx %g2, %g7, %g2 ! Shift to get bytes of page size
sub %g2, 1, %g2 ! Page mask for TTE retrieved
andncc %g7, %g3, %g7 ! Check for tag match
bne,pt %xcc, .pml_next_loop
! Brute force demap both I & D Tlbs:
! FIXME: really only need to do pages ...
ldxa [%g7]ASI_MMU, %g2 ! save current primary ctx
stxa %g0, [%g7]ASI_MMU ! switch to ctx0
stxa %g0, [%o0]ASI_IMMU_DEMAP
stxa %g0, [%o0]ASI_DMMU_DEMAP
stxa %g2, [%g7]ASI_MMU ! restore original primary ctx
sub %g6, MAPPING_SIZE, %g6
brgez,pt %g6, .perm_map_loop
add %g1, GUEST_PERM_MAPPINGS_LOCK, %g6
! %g1 = perm mappings list
! %g6 = offset of matching or free entry
membar #StoreStore | #LoadStore
! Now determine the offset and bit that needs setting for this vcpu
/* Calculate this cpu's cpuset mask */
ldub [%g1 + CPU_VID], %g2
and %g2, MAPPING_XWORD_MASK, %g3
srlx %g2, MAPPING_XWORD_SHIFT, %g2
sllx %g2, MAPPING_XWORD_BYTE_SHIFT_BITS, %g2
add %g6, %g2, %g2 ! Just add offset to this for I or d cpuset arrays
beq,pn %xcc, .perm_map_testd
ldx [ %g2 + MAPPING_ICPUSET ], %g4
stx %g4, [ %g2 + MAPPING_ICPUSET ]
beq,pn %xcc, .perm_map_done
ldx [ %g2 + MAPPING_DCPUSET ], %g4
stx %g4, [ %g2 + MAPPING_DCPUSET ]
stx %o0, [ %g6 + MAPPING_VA ]
stx %o2, [ %g6 + MAPPING_TTE ] ! Finally store the TTE
membar #StoreStore | #StoreLoad
VCPU2GUEST_STRUCT(%g1, %g1)
add %g1, GUEST_PERM_MAPPINGS_LOCK, %g7
SET_SIZE(hcall_mmu_map_perm_addr)
* FIXME: Need to make this a subroutine call so it can
* be performed as part of the guest and CPU exit clean up.
ENTRY_NP(hcall_mmu_unmap_perm_addr)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o2, herr_inval)
add %g2, GUEST_PERM_MAPPINGS_LOCK, %g7
SPINLOCK_ENTER(%g7, %g3, %g5)
/* Search for existing perm mapping */
add %g2, GUEST_PERM_MAPPINGS, %g1
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g6
! %g1 = perm mappings list
* Skim mapping entries for potential match
* for (i=NPERMAPPINGS-1; i>=0 i--) {
* if ((addr & table[i].mask)^table[i].tag) == 0) {
* matching entry ... invalidate it
ldda [ %g1 + %g6 ] ASI_QUAD_LDD, %g4 ! Ld Tag (g4) + TTE (g5)
brgez,a,pn %g5, .puml_next_loop
and %g5, TTE_SZ_MASK, %g5
add %g5, %g3, %g3 ! Multiply by 3
add %g3, 13, %g3 ! Add 13
sllx %g5, %g3, %g3 ! Shift to get bytes of page size
sub %g3, 1, %g3 ! Page mask for TTE retrieved
andncc %g5, %g3, %g0 ! Check for tag match
brgz,pt %g6, .perm_unmap_loop
sub %g6, MAPPING_SIZE, %g6
! Bail out no match was found
add %g2, GUEST_PERM_MAPPINGS_LOCK, %g7
! %g1 = perm mappings list
! %g6 = offset of matching entry
! NOTE: We assume that the overlap match on insert is good enough that
! there can never be two or more matching entries in the mapping table
membar #StoreStore | #LoadStore
! Now determine the offset and bit that needs setting for this vcpu
! The remaining logic is as follows:
! For both the I & D cases determine if we need to clear the
! presence bits in the active cpusets, and perform a demap on the
! local CPU (always is the simplest case since the other strands
/* Calculate this cpu's cpuset mask */
ldub [%g1 + CPU_VID], %g3
and %g3, MAPPING_XWORD_MASK, %g5
srlx %g3, MAPPING_XWORD_SHIFT, %g5
sllx %g5, MAPPING_XWORD_BYTE_SHIFT_BITS, %g5
add %g6, %g5, %g5 ! Just add offset to this for I or d cpuset arrays
! %g4 = xword bit for vcpu
! %g5 = cpu xword offset into permmap
beq,pn %xcc, .perm_umap_testd
ldx [ %g5 + MAPPING_ICPUSET ], %g7
stx %g7, [ %g5 + MAPPING_ICPUSET ]
ldxa [%g7]ASI_MMU, %o1 ! save current primary ctx
stxa %g0, [%g7]ASI_MMU ! switch to ctx0
stxa %g0, [%o0]ASI_IMMU_DEMAP
stxa %o1, [%g7]ASI_MMU ! restore original primary ctx
beq,pn %xcc, .perm_umap_finish
ldx [ %g5 + MAPPING_DCPUSET ], %g7
stx %g7, [ %g5 + MAPPING_DCPUSET ]
ldxa [%g7]ASI_MMU, %o1 ! save current primary ctx
stxa %g0, [%g7]ASI_MMU ! switch to ctx0
stxa %g0, [%o0]ASI_DMMU_DEMAP
stxa %o1, [%g7]ASI_MMU ! restore original primary ctx
! Final step... if all the CPU set entries are gone
! then clean out the mapping entry itself
mov (NVCPU_XWORDS-1)*MAPPING_XWORD_SIZE, %g4
ldx [ %g7 + MAPPING_ICPUSET ], %g5
ldx [ %g7 + MAPPING_DCPUSET ], %g7
! Bail out if we find a non-zero entry
bne,pn %xcc, .perm_umap_done
sub %g4, MAPPING_XWORD_SIZE, %g4
stx %g0, [ %g6 + MAPPING_TTE ] ! Invalidate first
stx %g0, [ %g6 + MAPPING_VA ] ! For sanity cleanse tag
membar #StoreStore | #StoreLoad
VCPU2GUEST_STRUCT(%g1, %g1)
add %g1, GUEST_PERM_MAPPINGS_LOCK, %g7
SET_SIZE(hcall_mmu_unmap_perm_addr)
ENTRY_NP(hcall_mmu_perm_addr_info)
SET_SIZE(hcall_mmu_perm_addr_info)
* arg0 mmustat buffer ra (%o0)
* ret1 old mmustat buffer ra (%o1)
ENTRY_NP(hcall_niagara_mmustat_conf)
btst MMUSTAT_AREA_ALIGN - 1, %o0 ! check alignment
bnz,pn %xcc, herr_badalign
VCPU_GUEST_STRUCT(%g1, %g4)
RA2PA_RANGE_CONV(%g4, %o0, MMUSTAT_AREA_SIZE, herr_noraddr, %g3, %g2)
ldx [%g1 + CPU_MMUSTAT_AREA_RA], %o1
stx %o0, [%g1 + CPU_MMUSTAT_AREA_RA]
stx %g2, [%g1 + CPU_MMUSTAT_AREA]
SET_SIZE(hcall_niagara_mmustat_conf)
* ret1 mmustat buffer ra (%o1)
ENTRY_NP(hcall_niagara_mmustat_info)
ldx [%g1 + CPU_MMUSTAT_AREA_RA], %o1
SET_SIZE(hcall_niagara_mmustat_info)