* ========== Copyright Header Begin ==========================================
* Hypervisor Software File: hcall_mmu.s
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* - Do no alter or remove copyright notices
* - Redistribution and use of this software in source and binary forms, with
* or without modification, are permitted provided that the following
* - Redistribution of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistribution in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of Sun Microsystems, Inc. or the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
* You acknowledge that this software is not designed, licensed or
* intended for use in the design, construction, operation or maintenance of
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
.ident "@(#)hcall_mmu.s 1.3 07/07/17 SMI"
#include <sys/asm_linkage.h>
ENTRY_NP(hcall_mmu_tsb_ctx0)
VCPU_GUEST_STRUCT(%g5, %g6)
/* set cpu->ntsbs to zero now in case we error exit */
stx %g0, [%g5 + CPU_NTSBS_CTX0]
/* Also zero out H/W bases */
HVCALL(set_dummytsb_ctx0)
btst TSBD_ALIGNMENT - 1, %o1
bnz,pn %xcc, herr_badalign
sllx %o0, TSBD_SHIFT, %g3
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g3, herr_noraddr, %g2, %g1)
/* xcopy(tsbs, cpu->tsbds, ntsbs*TSBD_BYTES) */
add %g5, CPU_TSBDS_CTX0, %g2
/* loop over each TSBD and validate */
add %g5, CPU_TSBDS_CTX0, %g2
/* check pagesize - accept only valid encodings */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
bgeu,pn %xcc, herr_badpgsz
btst TTE_VALIDSIZEARRAY, %g3
/* check that pageszidx is set in pageszmask */
lduw [%g2 + TSBD_PGSZS_OFF], %g4
/* check that pageszidx is lowest-order bit of pageszmask */
/* check associativity - only support 1-way */
lduh [%g2 + TSBD_ASSOC_OFF], %g3
lduw [%g2 + TSBD_SIZE_OFF], %g3
btst %g3, %g4 ! check for power-of-two
sll %g4, TSB_MAX_SZCODE, %g4
/* check context index field - must be -1 (shared) or zero/one */
lduw [%g2 + TSBD_CTX_INDEX], %g3
cmp %g3, TSBD_CTX_IDX_SHARE
be,pt %xcc, 2f ! -1 is OK
/* check reserved field - must be zero for now */
ldx [%g2 + TSBD_RSVD_OFF], %g3
/* check TSB base real address */
ldx [%g2 + TSBD_BASE_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g4
sllx %g4, TSBE_SHIFT, %g4
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, herr_noraddr, %g7, %g2)
add %g5, CPU_TSBDS_CTX0, %g2
/* range OK, check alignment */
bnz,pn %xcc, herr_badalign
/* now setup HWTW regs */
bgeu,pn %xcc, .ctx0_tsbd_finish
add %g5, CPU_TSBDS_CTX0, %g2
sllx %g7, TSBD_SHIFT, %g1
ldx [%g2 + TSBD_BASE_OFF], %g1
RA2PA_CONV(%g6, %g1, %g1, %g4) ! start with TSB base PA
lduw [%g2 + TSBD_SIZE_OFF], %g4
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
or %g1, %g4, %g1 ! add page size field
or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
ld [%g2 + TSBD_CTX_INDEX], %g3
cmp %g3, 0 ! use primary-ctx0 always?
move %xcc, USE_TSB_PRIMARY_CTX, %g4
cmp %g3, 1 ! use secondary-ctx0 always?
move %xcc, USE_TSB_SECONDARY_CTX, %g4
sllx %g4, TSB_CFG_USE_CTX1_SHIFT, %g4
or %g1, %g4, %g1 ! add any use-ctx0|ctx1 bits
or %g1, %g4, %g1 ! add valid bit
move %xcc, TSB_CFG_CTX0_1, %g4
move %xcc, TSB_CFG_CTX0_2, %g4
move %xcc, TSB_CFG_CTX0_3, %g4
stxa %g1, [%g4]ASI_MMU_TSB
mulx %g7, STRAND_MRA_INCR, %g3 ! save z_tsb_cfg in strand.mra[0->3]
ba,pt %xcc, .ctx0_tsbd_loop
stx %o0, [%g5 + CPU_NTSBS_CTX0]
clr %o1 ! no return value
SET_SIZE(hcall_mmu_tsb_ctx0)
ENTRY_NP(hcall_mmu_tsb_ctxnon0)
VCPU_GUEST_STRUCT(%g5, %g6)
/* set cpu->ntsbs to zero now in case we error exit */
stx %g0, [%g5 + CPU_NTSBS_CTXN]
/* Also zero out H/W bases */
HVCALL(set_dummytsb_ctxN)
btst TSBD_ALIGNMENT - 1, %o1
bnz,pn %xcc, herr_badalign
sllx %o0, TSBD_SHIFT, %g3
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g3, herr_noraddr, %g2, %g1)
add %g5, CPU_TSBDS_CTXN, %g2
/* loop over each TSBD and validate */
add %g5, CPU_TSBDS_CTXN, %g2
/* check pagesize - accept only valid encodings */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
bgeu,pn %xcc, herr_badpgsz
btst TTE_VALIDSIZEARRAY, %g3
/* check that pageszidx is set in pageszmask */
lduw [%g2 + TSBD_PGSZS_OFF], %g4
/* check that pageszidx is lowest-order bit of pageszmask */
/* check associativity - only support 1-way */
lduh [%g2 + TSBD_ASSOC_OFF], %g3
lduw [%g2 + TSBD_SIZE_OFF], %g3
btst %g3, %g4 ! check for power-of-two
sll %g4, TSB_MAX_SZCODE, %g4
/* check context index field - must be -1 (shared) or zero/one */
lduw [%g2 + TSBD_CTX_INDEX], %g3
cmp %g3, TSBD_CTX_IDX_SHARE
be,pt %xcc, 2f ! -1 is OK
/* check reserved field - must be zero for now */
ldx [%g2 + TSBD_RSVD_OFF], %g3
/* check TSB base real address */
ldx [%g2 + TSBD_BASE_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g4
sllx %g4, TSBE_SHIFT, %g4
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, herr_noraddr, %g7, %g2)
add %g5, CPU_TSBDS_CTXN, %g2
/* range OK, check alignment */
bnz,pn %xcc, herr_badalign
/* now setup HWTW regs */
bgeu,pn %xcc, .ctxn_tsbd_finish
add %g5, CPU_TSBDS_CTXN, %g2
sllx %g7, TSBD_SHIFT, %g1
ldx [%g2 + TSBD_BASE_OFF], %g1
RA2PA_CONV(%g6, %g1, %g1, %g4) ! start with TSB base PA
lduw [%g2 + TSBD_SIZE_OFF], %g4
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
or %g1, %g4, %g1 ! add page size field
or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
ld [%g2 + TSBD_CTX_INDEX], %g3
cmp %g3, 0 ! use primary-ctxnon0 always?
move %xcc, USE_TSB_PRIMARY_CTX, %g4
cmp %g3, 1 ! use secondary-ctxnon0 always?
move %xcc, USE_TSB_SECONDARY_CTX, %g4
sllx %g4, TSB_CFG_USE_CTX1_SHIFT, %g4
or %g1, %g4, %g1 ! add any use-ctxnon0|ctx1 bits
or %g1, %g4, %g1 ! add valid bit
move %xcc, TSB_CFG_CTXN_1, %g4
move %xcc, TSB_CFG_CTXN_2, %g4
move %xcc, TSB_CFG_CTXN_3, %g4
stxa %g1, [%g4]ASI_MMU_TSB
add %g7, 4, %g3 ! save nz_tsb_cfg in strand.mra[4->7]
mulx %g3, STRAND_MRA_INCR, %g3
ba,pt %xcc, .ctxn_tsbd_loop
stx %o0, [%g5 + CPU_NTSBS_CTXN]
clr %o1 ! no return value
SET_SIZE(hcall_mmu_tsb_ctxnon0)
ENTRY_NP(hcall_mmu_tsb_ctx0_info)
VCPU_GUEST_STRUCT(%g5, %g6)
! actual ntsbs always returned in %o1, so save tsbs now
! Check to see if ntsbs fits into the supplied buffer
ldx [%g5 + CPU_NTSBS_CTX0], %o1
btst TSBD_ALIGNMENT - 1, %g4
bnz,pn %xcc, herr_badalign
sllx %o1, TSBD_SHIFT, %g3
! %g3 size of tsbd in bytes
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g4, %g3, herr_noraddr, %g1, %g2)
! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
add %g5, CPU_TSBDS_CTX0, %g1
SET_SIZE(hcall_mmu_tsb_ctx0_info)
ENTRY_NP(hcall_mmu_tsb_ctxnon0_info)
VCPU_GUEST_STRUCT(%g5, %g6)
! actual ntsbs always returned in %o1, so save tsbs now
! Check to see if ntsbs fits into the supplied buffer
ldx [%g5 + CPU_NTSBS_CTXN], %o1
btst TSBD_ALIGNMENT - 1, %g4
bnz,pn %xcc, herr_badalign
sllx %o1, TSBD_SHIFT, %g3
! %g3 size of tsbd in bytes
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g4, %g3, herr_noraddr, %g1, %g2)
! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
add %g5, CPU_TSBDS_CTXN, %g1
SET_SIZE(hcall_mmu_tsb_ctxnon0_info)
* mmu_map_addr - stuff ttes directly into the tlbs
ENTRY_NP(hcall_mmu_map_addr)
VCPU_GUEST_STRUCT(%g1, %g6)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
sub %g4, 1, %g5 ! %g5 page mask
srlx %g2, 64 - 40 + 13, %g2
sllx %g2, 13, %g2 ! %g2 real address
xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
RA2PA_RANGE_CONV(%g6, %g2, %g4, 3f, %g5, %g7)
4: or %g3, %g2, %g1 ! %g1 new tte with pa
or %o0, %o1, %g2 ! %g2 tag
mov MMU_TAG_ACCESS, %g3 ! %g3 tag_access
stxa %g1, [%g0]ASI_DTLB_DATA_IN
! condition codes still set
stxa %g1, [%g0]ASI_ITLB_DATA_IN
RANGE_CHECK_IO(%g6, %g2, %g4, .hcall_mmu_map_addr_io_found,
.hcall_mmu_map_addr_io_not_found, %g1, %g5)
.hcall_mmu_map_addr_io_found:
.hcall_mmu_map_addr_io_not_found:
ALTENTRY(hcall_mmu_map_addr_ra_not_found)
SET_SIZE(hcall_mmu_map_addr)
ENTRY_NP(hcall_mmu_unmap_addr)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o2, herr_inval)
set (NCTXS - 1), %g2 ! 8K page mask
ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
ldxa [%g4]ASI_MMU, %g5 ! save current primary ctx1
stxa %o1, [%g1]ASI_MMU ! switch to new ctx
stxa %g0, [%g2]ASI_IMMU_DEMAP
stxa %g0, [%g2]ASI_DMMU_DEMAP
2: stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
stxa %g5, [%g4]ASI_MMU ! restore original primary ctx1
SET_SIZE(hcall_mmu_unmap_addr)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_page)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_VA_CTX(%o2, %o3, herr_inval, %g2)
CHECK_MMU_FLAGS(%o4, herr_inval)
ldxa [%g4]ASI_MMU, %g5 ! save primary ctx1
stxa %g0, [%g2]ASI_IMMU_DEMAP
stxa %g0, [%g2]ASI_DMMU_DEMAP
2: stxa %g3, [%g1]ASI_MMU ! restore primary ctx
stxa %g5, [%g4]ASI_MMU ! restore primary ctx1
SET_SIZE(hcall_mmu_demap_page)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_ctx)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_CTX(%o2, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
set TLB_DEMAP_CTX_TYPE, %g3
ldxa [%g4]ASI_MMU, %g6 ! save current primary ctx1
stxa %g0, [%g3]ASI_IMMU_DEMAP
stxa %g0, [%g3]ASI_DMMU_DEMAP
2: stxa %g7, [%g2]ASI_MMU ! restore primary ctx
stxa %g6, [%g4]ASI_MMU ! restore primary ctx1
SET_SIZE(hcall_mmu_demap_ctx)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_all)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_MMU_FLAGS(%o2, herr_inval)
set TLB_DEMAP_ALL_TYPE, %g3
stxa %g0, [%g3]ASI_IMMU_DEMAP
stxa %g0, [%g3]ASI_DMMU_DEMAP
SET_SIZE(hcall_mmu_demap_all)
* arg1 context (%o1) must be zero
ENTRY_NP(hcall_mmu_map_perm_addr)
VCPU_GUEST_STRUCT(%g1, %g6)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
! Fail if tte isn't valid
TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
sub %g4, 1, %g5 ! %g5 page mask
! Fail if page-offset bits aren't zero
srlx %g2, 64 - 40 + 13, %g2
sllx %g2, 13, %g2 ! %g2 real address
xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g2, %g4, herr_noraddr, %g5, %g7)
or %g3, %g7, %g2 ! %g2 new tte with pa
* OBP & Solaris assume demap semantics. Whack the TLBs to remove
* overlapping (multi-hit trap producing) entries. Note this isn't
* strictly necessary for incoming 8KB entries as auto-demap would
set (TLB_DEMAP_CTX_TYPE | TLB_DEMAP_NUCLEUS), %g1
stxa %g0, [%g1]ASI_IMMU_DEMAP
stxa %g0, [%g1]ASI_DMMU_DEMAP
add %g6, GUEST_PERM_MAPPINGS_LOCK, %g1
SPINLOCK_ENTER(%g1, %g3, %g4)
/* Search for existing perm mapping */
add %g6, GUEST_PERM_MAPPINGS, %g1
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
* Save the first uninitialised or invalid entry (TTE_V == 0)
* for the permanent mapping. Loop through all entries checking
* for an existing matching entry.
* for (i = NPERMMAPPINGS - 1; i >= 0; i--) {
* if (!table[i] || !table[i]->tte.v) {
* saved_entry = &table[i]; // free entry
* if (table[i]->va == va) {
* saved_entry = &table[i]; // matching entry
! %g1 = permanent mapping table base address
! %g3 = current offset into table
! %g4 = last free entry / saved_entry
ldx [%g5 + MAPPING_TTE], %g6
* if (saved_entry == 0) {
* // (first invalid/uninitialised entry)
* saved_entry = current_entry;
brgez,a,pt %g6, .pmap_continue
* saved_entry = current_entry;
* NB: overlapping mappings not detected, behavior
* is undefined right now. The hardware will demap
* when we insert and a TLB error later could reinstall
* both in some order where the end result is different
* than the post-map-perm result.
ldx [%g5 + MAPPING_VA], %g6
be,a,pt %xcc, .pmap_break
deccc GUEST_PERM_MAPPINGS_INCR, %g3
* if (saved_entry == NULL)
brz,a,pn %g4, .pmap_return
* if (saved_entry->tte.v)
* existing entry to modify
ldx [%g4 + MAPPING_TTE], %g5
brgez,pn %g5, .pmap_free_entry
* Compare new tte with existing tte
bne,a,pn %xcc, .pmap_return
ldub [%g1 + CPU_VID], %g1
* if (saved_entry->icpuset & (1 << curcpu))
ldx [%g4 + MAPPING_ICPUSET], %g5
bnz,a,pn %xcc, .pmap_return
* if (saved_entry->dcpuset & (1 << curcpu))
ldx [%g4 + MAPPING_DCPUSET], %g5
bnz,a,pn %xcc, .pmap_return
stx %o0, [%g4 + MAPPING_VA]
stx %g2, [%g4 + MAPPING_TTE]
ldub [%g1 + CPU_VID], %g3
* If no other strands on this core have this mapping then map
* if (((m->icpuset >> (CPU2COREID(curcpu) * 8)) & 0xff) == 0 &&
* ((m->dcpuset >> (CPU2COREID(curcpu) * 8)) & 0xff) == 0) {
ldx [%g4 + MAPPING_ICPUSET], %g5
ldx [%g4 + MAPPING_DCPUSET], %g3
sllx %g6, CPUID_2_COREID_SHIFT, %g6 ! %g6 * NSTRANDSPERCORE
stxa %g2, [%g0]ASI_ITLB_DATA_IN
stxa %g2, [%g0]ASI_DTLB_DATA_IN
* m->icpuset |= (1 << CPU->pid);
ldx [%g4 + MAPPING_ICPUSET], %g5
stx %g5, [%g4 + MAPPING_ICPUSET]
* m->dcpuset |= (1 << CPU->pid);
ldx [%g4 + MAPPING_DCPUSET], %g5
stx %g5, [%g4 + MAPPING_DCPUSET]
inc GUEST_PERM_MAPPINGS_LOCK, %g1
SET_SIZE(hcall_mmu_map_perm_addr)
ENTRY_NP(hcall_mmu_unmap_perm_addr)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o2, herr_inval)
* Search for existing perm mapping
add %g6, GUEST_PERM_MAPPINGS, %g1
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
add %g6, GUEST_PERM_MAPPINGS_LOCK, %g2
SPINLOCK_ENTER(%g2, %g5, %g6)
* for (i = NPERMMAPPINGS - 1; i >= 0; i--) {
* if (table[i]->va == va)
! %g1 = permanent mapping table base address
! %g3 = current offset into table
! %g4 = last free entry / saved_entry
ldx [%g5 + MAPPING_TTE], %g6
brgez,pt %g6, .punmap_continue
ldx [%g5 + MAPPING_VA], %g6
be,pt %xcc, .punmap_break
deccc GUEST_PERM_MAPPINGS_INCR, %g3
bgeu,pt %xcc, .punmap_loop
! %g5 = entry in mapping table
brlz,a,pn %g3, .punmap_return
ldub [%g1 + CPU_VID], %g3
! %g5 = entry in mapping table
* m->cpuset_i &= ~(1 << curcpu);
ldx [%g5 + MAPPING_ICPUSET], %g2
stx %g2, [%g5 + MAPPING_ICPUSET]
* m->cpuset_d &= ~(1 << curcpu);
ldx [%g5 + MAPPING_DCPUSET], %g2
stx %g2, [%g5 + MAPPING_DCPUSET]
* If no other strands on this core still use this mapping
* then demap it in both TLBs.
* if (((m->cpuset_i >> (CPU2COREID(curcpu) * 8)) & 0xff) == 0 &&
* ((m->cpuset_d >> (CPU2COREID(curcpu) * 8)) & 0xff) == 0) {
ldx [%g5 + MAPPING_ICPUSET], %g4
ldx [%g5 + MAPPING_DCPUSET], %g3
sllx %g6, CPUID_2_COREID_SHIFT, %g6 ! %g6 * NSTRANDSPERCORE
ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
ldxa [%g4]ASI_MMU, %g6 ! save current primary ctx1
stxa %o1, [%g1]ASI_MMU ! switch to new ctx
stxa %g0, [%o0]ASI_IMMU_DEMAP
stxa %g0, [%o0]ASI_DMMU_DEMAP
stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
stxa %g6, [%g4]ASI_MMU ! restore original primary ctx1
* if (m->cpuset_d == 0 && m->cpuset_i == 0) {
ldx [%g5 + MAPPING_DCPUSET], %g1
ldx [%g5 + MAPPING_ICPUSET], %g2
stx %g0, [%g5 + MAPPING_VA]
ldx [%g5 + MAPPING_TTE], %g1
stx %g1, [%g5 + MAPPING_TTE]
inc GUEST_PERM_MAPPINGS_LOCK, %g1
SET_SIZE(hcall_mmu_unmap_perm_addr)
ENTRY_NP(hcall_mmu_perm_addr_info)
! Check to see if table fits into the supplied buffer
bnz,pn %xcc, herr_badalign
mulx %o1, PERMMAPINFO_BYTES, %g3
! %g3 size of permmap table in bytes
RA2PA_RANGE_CONV_UNK_SIZE(%g7, %o0, %g3, herr_noraddr, %g5, %g2)
add %g7, GUEST_PERM_MAPPINGS_LOCK, %g1
SPINLOCK_ENTER(%g1, %g3, %g4)
* Search for valid perm mappings
add %g7, GUEST_PERM_MAPPINGS, %g1
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
! %o1 = count of valid entries
! %g1 = base of mapping table
! %g2 = pa of guest's buffer
! %g3 = current offset into table
! %g4 = current entry in table
ldx [%g4 + MAPPING_TTE], %g5
brgez,pn %g5, .perm_info_continue
/* Found a valid mapping */
ldx [%g4 + MAPPING_VA], %g5
stx %g5, [%g2 + PERMMAPINFO_VA]
stx %g0, [%g2 + PERMMAPINFO_CTX]
ldx [%g4 + MAPPING_TTE], %g5
stx %g5, [%g2 + PERMMAPINFO_TTE]
ldub [%g5 + CPU_VID], %g5
ldx [%g4 + MAPPING_ICPUSET], %g5
0: ldx [%g4 + MAPPING_DCPUSET], %g5
0: stx %g6, [%g4 + PERMMAPINFO_FLAGS]
inc PERMMAPINFO_BYTES, %g2
deccc GUEST_PERM_MAPPINGS_INCR, %g3
bgeu,pt %xcc, .perm_info_loop
inc GUEST_PERM_MAPPINGS_LOCK, %g1
SET_SIZE(hcall_mmu_perm_addr_info)