* ========== Copyright Header Begin ==========================================
* OpenSPARC T2 Processor File: hcall.s
* Copyright (C) 1995-2007 Sun Microsystems, Inc. All Rights Reserved
* 4150 Network Circle, Santa Clara, California 95054, U.S.A.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* For the avoidance of doubt, and except that if any non-GPL license
* choice is available it will apply instead, Sun elects to use only
* the General Public License version 2 (GPLv2) at this time for any
* software where a choice of GPL license versions is made
* available with the language indicating that GPLv2 or any later version
* may be used, or where a choice of which version of the GPL is applied is
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* ========== Copyright Header End ============================================
#define HCALL_TRAP_BASE_PA 0x2000
#define HCALL_TRAP_DATA_PA 0x4000
#define ASI_PRIMARY_LITTLE 0x88
SECTION .HCALL_TRAPS TEXT_VA=HCALL_TRAP_BASE_PA, DATA_VA=HCALL_TRAP_DATA_PA
.ident "@(#)hcall.s 1.72 05/04/29 SMI"
#include <sys/asm_linkage.h>
#include <sparcv9/misc.h>
#include <niagara/jbi_regs.h>
#include <niagara/dram.h>
#include <devices/pc16550.h>
#define VDEV_GENINTR 0x280 /* for testing */
#endif /* CONFIG_BRINGUP */
* hcall_core - entry point for the core hcalls: versioning plus
* aliases for standard APIs that need to be called when there
* exists a version mismatch.
* hcall - entry point for FAST_TRAP hcalls
be,pn %xcc, hcall_mmu_demap_page
be,pn %xcc, hcall_mmu_demap_ctx
be,pn %xcc, hcall_mmu_demap_all
be,pn %xcc, hcall_cpu_mondo_send
cmp %o5, IO_PEEK ! io peek, suppress errors
be,pn %xcc, hcall_io_peek
cmp %o5, IO_POKE ! io poke, suppress errors
be,pn %xcc, hcall_io_poke
be,pn %xcc, hcall_cons_putchar
be,pn %xcc, hcall_cons_getchar
be,pn %xcc, hcall_tod_get
be,pn %xcc, hcall_tod_set
be,pn %xcc, hcall_mmu_tsb_ctx0
be,pn %xcc, hcall_mmu_tsb_ctxnon0
cmp %o5, MMU_MAP_PERM_ADDR
be,pn %xcc, hcall_mmu_map_perm_addr
cmp %o5, MMU_UNMAP_PERM_ADDR
be,pn %xcc, hcall_mmu_unmap_perm_addr
cmp %o5, MMU_FAULT_AREA_CONF
be,pn %xcc, hcall_mmu_fault_area_conf
be,pn %xcc, hcall_mem_scrub
be,pn %xcc, hcall_mem_sync
#if defined(NVRAM_READ) && defined(NVRAM_WRITE)
be,pn %xcc, hcall_nvram_read
be,pn %xcc, hcall_nvram_write
be,pn %xcc, hcall_svc_send
be,pn %xcc, hcall_svc_recv
be,pn %xcc, hcall_svc_getstatus
be,pn %xcc, hcall_svc_setstatus
be,pn %xcc, hcall_svc_clrstatus
be,pn %xcc, hcall_cpu_start
be,pn %xcc, hcall_cpu_stop
be,pn %xcc, hcall_cpu_state
be,pn %xcc, hcall_cpu_yield
be,pn %xcc, hcall_mach_sir
be,pn %xcc, hcall_mach_exit
be,pn %xcc, hcall_cpu_myid
be,pn %xcc, hcall_mmu_enable
cmp %o5, MMU_TSB_CTX0_INFO
be,pn %xcc, hcall_mmu_tsb_ctx0_info
cmp %o5, MMU_TSB_CTXNON0_INFO
be,pn %xcc, hcall_mmu_tsb_ctxnon0_info
cmp %o5, NIAGARA_GET_PERFREG
be,pn %xcc, hcall_niagara_getperf
cmp %o5, NIAGARA_SET_PERFREG
be,pn %xcc, hcall_niagara_setperf
be,pn %xcc, hcall_mach_desc
be,pn %xcc, hcall_dump_buf_info
be,pn %xcc, hcall_dump_buf_update
cmp %o5, INTR_DEVINO2SYSINO
be,pn %xcc, hcall_intr_devino2sysino
be,pn %xcc, hcall_intr_getenabled
be,pn %xcc, hcall_intr_setenabled
be,pn %xcc, hcall_intr_getstate
be,pn %xcc, hcall_intr_setstate
be,pn %xcc, hcall_intr_gettarget
be,pn %xcc, hcall_intr_settarget
be,pn %xcc, hcall_vpci_iommu_map
cmp %o5, VPCI_IOMMU_UNMAP
be,pn %xcc, hcall_vpci_iommu_unmap
cmp %o5, VPCI_IOMMU_GETMAP
be,pn %xcc, hcall_vpci_iommu_getmap
cmp %o5, VPCI_IOMMU_GETBYPASS
be,pn %xcc, hcall_vpci_iommu_getbypass
be,pn %xcc, hcall_vpci_config_get
be,pn %xcc, hcall_vpci_config_put
be,pn %xcc, hcall_vpci_io_peek
be,pn %xcc, hcall_vpci_io_poke
be,pn %xcc, hcall_vpci_dma_sync
be,pn %xcc, hcall_msiq_conf
be,pn %xcc, hcall_msiq_info
be,pn %xcc, hcall_msiq_getvalid
be,pn %xcc, hcall_msiq_setvalid
be,pn %xcc, hcall_msiq_getstate
be,pn %xcc, hcall_msiq_setstate
be,pn %xcc, hcall_msiq_gethead
be,pn %xcc, hcall_msiq_sethead
be,pn %xcc, hcall_msiq_gettail
be,pn %xcc, hcall_msi_getvalid
be,pn %xcc, hcall_msi_setvalid
be,pn %xcc, hcall_msi_getstate
be,pn %xcc, hcall_msi_setstate
be,pn %xcc, hcall_msi_getmsiq
be,pn %xcc, hcall_msi_setmsiq
cmp %o5, MSI_MSG_GETVALID
be,pn %xcc, hcall_msi_msg_getvalid
cmp %o5, MSI_MSG_SETVALID
be,pn %xcc, hcall_msi_msg_setvalid
be,pn %xcc, hcall_msi_msg_getmsiq
be,pn %xcc, hcall_msi_msg_setmsiq
be,pn %xcc, hcall_disk_read
be,pn %xcc, hcall_disk_write
be,pn %xcc, hcall_ncs_request
be,pn %xcc, hcall_mmu_stat_area
be,pn %xcc, hcall_ttrace_buf_conf
be,pn %xcc, hcall_ttrace_buf_info
be,pn %xcc, hcall_ttrace_enable
be,pn %xcc, hcall_ttrace_freeze
cmp %o5, MMU_FAULT_AREA_INFO
be,pn %xcc, hcall_mmu_fault_area_info
be,pn %xcc, hcall_get_rtba
be,pn %xcc, hcall_set_rtba
be,pn %xcc, hcall_vdev_genintr
* Common error escapes so errors can be implemented by
SET_SIZE(herr_wouldblock)
SET_SIZE(herr_notsupported)
#ifdef CONFIG_IO_PEEK_POKE
* Function: hcall_io_peek(void *ioaddr, int bw)
* %o5 - hcall function number
* (1 = byte, 2 = halfword, 4 = word, 8 = double
* %o0 - EOK (for success), EINVAL or EIO (for failure)
* %o1 - i/o data on successful read
stx %g2, [%g1 + %g3] ! cpu.io_prot = 1
! clear io_prot, return EINVAL
stx %g0, [%g1 + %g3] ! cpu.io_prot = 0
! %g1 has the cpu pointer
* check io_error flag which will be nonzero if a UE occurred
* %g1 has this_cpu, %g2 has read return data
* %g3 is cpu.io_prot offset
ldx [%g1 + %g4], %g5 ! cpu.io_error
stx %g0, [%g1 + %g3] ! cpu.io_prot = 0
mov %g2, %o1 ! return data in %o1
! i/o error, clear io_error flag
stx %g0, [%g1 + %g4] ! cpu.io_error = 0
* Function: hcall_io_poke(void *addr, uint64_t data, int size)
* %o5 - hcall function number
* (1 = byte, 2 = halfword, 4 = word, 8 = extword)
* %o0 - EOK (on success), EINVAL or EIO (on failure)
stx %g2, [%g1 + %g3] ! cpu.io_prot = 1
! clear io_prot, return EINVAL
stx %g0, [%g1 + %g3] ! cpu.io_prot = 0
! %g1 has the cpu pointer
* check io_error flag which will be nonzero if a UE occurred
* %g1 has this_cpu, %g2 has read return data
* %g3 is cpu.io_prot offset
stx %g0, [%g1 + %g3] ! cpu.io_prot = 0
! i/o error, clear io_error flag
stx %g0, [%g1 + %g4] ! cpu.io_error = 0
ENTRY_NP(hcall_mach_exit)
* - quiesce all other cpus in guest
* - go back to start so boot cpu (maybe not this cpu)
* can reboot the guest or wait for further instructions
ba,pt %xcc, vbsc_guest_exit
SET_SIZE(hcall_mach_exit)
* - quiesce all other cpus in guest
* - go back to start so boot cpu (maybe not this cpu)
* can reboot the guest or wait for further instructions
ba,pt %xcc, vbsc_guest_sir
* ret1 actual len (%o1) (for EOK or EINVAL)
* guest uses this sequence to get the machine description:
* if %o0 != EINVAL, failed
* so the EINVAL case is the first error check
ENTRY_NP(hcall_mach_desc)
CPU_GUEST_STRUCT(%g1, %g6)
! paranoia for xcopy - should already be 16byte multiple
add %g3, MACH_DESC_ALIGNMENT - 1, %g3
andn %g3, MACH_DESC_ALIGNMENT - 1, %g3
mov %g3, %o1 ! return PD size for success or EINVAL
btst MACH_DESC_ALIGNMENT - 1, %o0
bnz,pn %xcc, herr_badalign
.empty /* RANGE_CHECK may start in a delay slot */
RANGE_CHECK(%g6, %o0, %g3, herr_noraddr, %g4)
REAL_OFFSET(%g6, %o0, %g4, %g5)
! %g4 = pa of guest buffer
/* xcopy(pd, buf[%o0], size[%g3]) */
! %o1 was set above to the guest's PD size
SET_SIZE(hcall_mach_desc)
* tod_get - Time-of-day get
CPU2ROOT_STRUCT(%g1, %g2)
CPU2GUEST_STRUCT(%g1, %g1)
ldx [%g1 + GUEST_TOD_OFFSET], %g3
ldx [%g2 + CONFIG_TOD], %g4
ldx [%g2 + CONFIG_TODFREQUENCY], %g5
!! %g3 guest's tod offset
! If the PD says no TOD then start with 0
brz,pn %g4, herr_notsupported
clr %o1 ! In case error status not checked
udivx %o1, %g5, %o1 ! Convert to seconds
add %o1, %g3, %o1 ! Add partition's tod offset
* tod_set - Time-of-day set
CPU2ROOT_STRUCT(%g1, %g2)
CPU2GUEST_STRUCT(%g1, %g1)
ldx [%g1 + GUEST_TOD_OFFSET], %g3
ldx [%g2 + CONFIG_TOD], %g4
ldx [%g2 + CONFIG_TODFREQUENCY], %g5
!! %g3 guest's tod offset
* If no hardware TOD then tod-get returned 0 the first time
* and will continue to do so.
brz,pn %g4, herr_notsupported
ldx [%g4], %g6 ! %g6 = system tod
udivx %g6, %g5, %g6 ! Convert to seconds
sub %o0, %g6, %g6 ! %g4 = new delta
stx %g6, [%g1 + GUEST_TOD_OFFSET]
* arg1 return address (%o1)
ENTRY_NP(hcall_mmu_enable)
* Check requested return address for instruction
btst (INSTRUCTION_ALIGNMENT - 1), %o1
bnz,pn %xcc, herr_badalign
set (LSUCR_DM | LSUCR_IM), %g2
!! %g1 = current lsucr value
brz,pn %o0, 1f ! enable or disable?
btst %g1, %g2 ! ccr indicates current status
* The return address will be virtual and we cannot
* check its range, the alignment has already been
bnz,pn %xcc, herr_inval ! it's already enabled
or %g1, %g2, %g1 ! enable MMU
* The return address is a real address so we check
* its range, the alignment has already been checked.
bz,pn %xcc, herr_inval ! it's already disabled
andn %g1, %g2, %g1 ! disable MMU
RANGE_CHECK(%g3, %o1, INSTRUCTION_SIZE, herr_noraddr, %g4)
SET_SIZE(hcall_mmu_enable)
ENTRY_NP(hcall_mmu_fault_area_conf)
btst (MMU_FAULT_AREA_ALIGNMENT - 1), %o0 ! check alignment
bnz,pn %xcc, herr_badalign
CPU_GUEST_STRUCT(%g1, %g4)
RANGE_CHECK(%g4, %o0, MMU_FAULT_AREA_SIZE, herr_noraddr, %g3)
REAL_OFFSET(%g4, %o0, %g2, %g3)
ldx [%g1 + CPU_MMU_AREA_RA], %o1
stx %o0, [%g1 + CPU_MMU_AREA_RA]
stx %g2, [%g1 + CPU_MMU_AREA]
SET_SIZE(hcall_mmu_fault_area_conf)
* ret1 fault area raddr (%o1)
ENTRY_NP(hcall_mmu_fault_area_info)
ldx [%g1 + CPU_MMU_AREA_RA], %o1
SET_SIZE(hcall_mmu_fault_area_info)
ENTRY_NP(hcall_mmu_tsb_ctx0)
CPU_GUEST_STRUCT(%g5, %g6)
INC_MMU_STAT(%g5, MMUSTAT_SET0, %g2, %g3)
/* set cpu->ntsbs to zero now in case we error exit */
stx %g0, [%g5 + CPU_NTSBS_CTX0]
/* Also zero out H/W bases */
btst TSBD_ALIGNMENT - 1, %o1
bnz,pn %xcc, herr_badalign
sllx %o0, TSBD_SHIFT, %g3
RANGE_CHECK(%g6, %o1, %g3, herr_noraddr, %g2)
/* xcopy(tsbs, cpu->tsbds, ntsbs*TSBD_BYTES) */
REAL_OFFSET(%g6, %o1, %g1, %g2)
add %g5, CPU_TSBDS_CTX0, %g2
/* loop over each TSBD and validate */
add %g5, CPU_TSBDS_CTX0, %g2
/* check pagesize - accept any size encoding? XXX */
/* XXX pageszidx is lowest-order bit of pageszmask */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
bgeu,pn %xcc, herr_badpgsz
/* check associativity - only support 1-way */
lduh [%g2 + TSBD_ASSOC_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g3
sll %g4, TSB_MAX_SZCODE, %g4
/* check context index field - must be -1 (shared) or zero */
ld [%g2 + TSBD_CTX_INDEX], %g3
cmp %g3, TSBD_CTX_IDX_SHARE
brnz,pn %g3, herr_inval ! only one set of context regs
/* check reserved field - must be zero for now */
ldx [%g2 + TSBD_RSVD_OFF], %g3
/* check TSB base real address */
ldx [%g2 + TSBD_BASE_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g4
sllx %g4, TSBE_SHIFT, %g4
RANGE_CHECK(%g6, %g3, %g4, herr_noraddr, %g7)
/* range OK, check alignment */
bnz,pn %xcc, herr_badalign
/* now setup HWTW regs */
/* XXX - only look at first two TSBDs for now */
/* XXX - setup use_context if TSBD context not shared or zero */
add %g5, CPU_TSBDS_CTX0, %g2
ldx [%g2 + TSBD_BASE_OFF], %g1
REAL_OFFSET(%g6, %g1, %g1, %g4) ! start with TSB base PA
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
or %g1, %g4, %g1 ! add page size field
or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
or %g1, %g4, %g1 ! add valid bit
stxa %g1, [%g4]ASI_MMU_TSB
/* process second TSBD, if available */
add %g2, TSBD_BYTES, %g2 ! move to next TSBD
ldx [%g2 + TSBD_BASE_OFF], %g1
REAL_OFFSET(%g6, %g1, %g1, %g4) ! start with TSB base PA
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
or %g1, %g4, %g1 ! add page size field
or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
or %g1, %g4, %g1 ! add valid bit
stxa %g1, [%g4]ASI_MMU_TSB
stx %o0, [%g5 + CPU_NTSBS_CTX0]
/* now setup H/W TSB regs */
/* only look at first two TSBDs for now */
add %g5, CPU_TSBDS_CTX0, %g2
ldx [%g2 + TSBD_BASE_OFF], %g1
REAL_OFFSET(%g6, %g1, %g1, %g4)
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS0
stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS0
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
stxa %g3, [%g0]ASI_DTSB_CONFIG_CTX0 ! (PS0 only)
stxa %g3, [%g0]ASI_ITSB_CONFIG_CTX0 ! (PS0 only)
/* process second TSBD, if available */
add %g2, TSBD_BYTES, %g2 ! move to next TSBD
ldx [%g2 + TSBD_BASE_OFF], %g1
REAL_OFFSET(%g6, %g1, %g1, %g4)
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS1
stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS1
/* %g3 still has old CONFIG value. */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g7
sllx %g7, ASI_TSB_CONFIG_PS1_SHIFT, %g7
stxa %g3, [%g0]ASI_DTSB_CONFIG_CTX0 ! (PS0 + PS1)
stxa %g3, [%g0]ASI_ITSB_CONFIG_CTX0 ! (PS0 + PS1)
stx %o0, [%g5 + CPU_NTSBS_CTX0]
clr %o1 ! no return value
SET_SIZE(hcall_mmu_tsb_ctx0)
ENTRY_NP(hcall_mmu_tsb_ctxnon0)
CPU_GUEST_STRUCT(%g5, %g6)
INC_MMU_STAT(%g5, MMUSTAT_SETN0, %g2, %g3)
/* set cpu->ntsbs to zero now in case we error exit */
stx %g0, [%g5 + CPU_NTSBS_CTXN]
/* Also zero out H/W bases */
btst TSBD_ALIGNMENT - 1, %o1
bnz,pn %xcc, herr_badalign
sllx %o0, TSBD_SHIFT, %g3
RANGE_CHECK(%g6, %o1, %g3, herr_noraddr, %g2)
/* xcopy(tsbs, cpu->tsbds, ntsbs*TSBD_BYTES) */
REAL_OFFSET(%g6, %o1, %g1, %g2)
add %g5, CPU_TSBDS_CTXN, %g2
/* loop over each TSBD and validate */
add %g5, CPU_TSBDS_CTXN, %g2
/* check pagesize - accept any size encoding? XXX */
/* XXX pageszidx is lowest-order bit of pageszmask */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
bgeu,pn %xcc, herr_badpgsz
/* check associativity - only support 1-way */
lduh [%g2 + TSBD_ASSOC_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g3
sll %g4, TSB_MAX_SZCODE, %g4
/* check context index field - must be -1 (shared) or zero */
ld [%g2 + TSBD_CTX_INDEX], %g3
cmp %g3, TSBD_CTX_IDX_SHARE
brnz,pn %g3, herr_inval ! only one set of context regs
/* check reserved field - must be zero for now */
ldx [%g2 + TSBD_RSVD_OFF], %g3
/* check TSB base real address */
ldx [%g2 + TSBD_BASE_OFF], %g3
ld [%g2 + TSBD_SIZE_OFF], %g4
sllx %g4, TSBE_SHIFT, %g4
RANGE_CHECK(%g6, %g3, %g4, herr_noraddr, %g7)
/* range OK, check alignment */
bnz,pn %xcc, herr_badalign
/* now setup HWTW regs */
/* XXX - only look at first two TSBDs for now */
/* XXX - setup use_context if TSBD context not shared or zero */
add %g5, CPU_TSBDS_CTXN, %g2
ldx [%g2 + TSBD_BASE_OFF], %g1
REAL_OFFSET(%g6, %g1, %g1, %g4) ! start with TSB base PA
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
or %g1, %g4, %g1 ! add page size field
or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
or %g1, %g4, %g1 ! add valid bit
stxa %g1, [%g4]ASI_MMU_TSB
/* process second TSBD, if available */
add %g2, TSBD_BYTES, %g2 ! move to next TSBD
ldx [%g2 + TSBD_BASE_OFF], %g1
REAL_OFFSET(%g6, %g1, %g1, %g4) ! start with TSB base PA
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
or %g1, %g4, %g1 ! add page size field
or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
or %g1, %g4, %g1 ! add valid bit
stxa %g1, [%g4]ASI_MMU_TSB
stx %o0, [%g5 + CPU_NTSBS_CTXN]
/* now setup H/W TSB regs */
/* only look at first two TSBDs for now */
add %g5, CPU_TSBDS_CTXN, %g2
ldx [%g2 + TSBD_BASE_OFF], %g1
REAL_OFFSET(%g6, %g1, %g1, %g4)
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS0
stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS0
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
stxa %g3, [%g0]ASI_DTSB_CONFIG_CTXN ! (PS0 only)
stxa %g3, [%g0]ASI_ITSB_CONFIG_CTXN ! (PS0 only)
/* process second TSBD, if available */
add %g2, TSBD_BYTES, %g2 ! move to next TSBD
ldx [%g2 + TSBD_BASE_OFF], %g1
REAL_OFFSET(%g6, %g1, %g1, %g4)
ld [%g2 + TSBD_SIZE_OFF], %g4
srl %g4, TSB_SZ0_SHIFT, %g4
add %g1, 1, %g1 ! increment TSB size field
stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS1
stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS1
/* %g3 still has old CONFIG value. */
lduh [%g2 + TSBD_IDXPGSZ_OFF], %g7
sllx %g7, ASI_TSB_CONFIG_PS1_SHIFT, %g7
stxa %g3, [%g0]ASI_DTSB_CONFIG_CTXN ! (PS0 + PS1)
stxa %g3, [%g0]ASI_ITSB_CONFIG_CTXN ! (PS0 + PS1)
stx %o0, [%g5 + CPU_NTSBS_CTXN]
clr %o1 ! no return value
SET_SIZE(hcall_mmu_tsb_ctxnon0)
ENTRY_NP(hcall_mmu_tsb_ctx0_info)
CPU_GUEST_STRUCT(%g5, %g6)
! actual ntsbs always returned in %o1, so save tsbs now
! Check to see if ntsbs fits into the supplied buffer
ldx [%g5 + CPU_NTSBS_CTX0], %o1
btst TSBD_ALIGNMENT - 1, %g4
bnz,pn %xcc, herr_badalign
sllx %o1, TSBD_SHIFT, %g3
!! %g3 size of tsbd in bytes
RANGE_CHECK(%g6, %g4, %g3, herr_noraddr, %g2)
REAL_OFFSET(%g6, %g4, %g2, %g1)
!! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
add %g5, CPU_TSBDS_CTX0, %g1
SET_SIZE(hcall_mmu_tsb_ctx0_info)
ENTRY_NP(hcall_mmu_tsb_ctxnon0_info)
CPU_GUEST_STRUCT(%g5, %g6)
! actual ntsbs always returned in %o1, so save tsbs now
! Check to see if ntsbs fits into the supplied buffer
ldx [%g5 + CPU_NTSBS_CTXN], %o1
btst TSBD_ALIGNMENT - 1, %g4
bnz,pn %xcc, herr_badalign
sllx %o1, TSBD_SHIFT, %g3
!! %g3 size of tsbd in bytes
RANGE_CHECK(%g6, %g4, %g3, herr_noraddr, %g2)
REAL_OFFSET(%g6, %g4, %g2, %g1)
!! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
add %g5, CPU_TSBDS_CTXN, %g1
SET_SIZE(hcall_mmu_tsb_ctxnon0_info)
* arg1 size (%o1) (bytes)
ENTRY_NP(hcall_mmu_stat_area)
btst (MMU_STAT_ALIGNMENT - 1), %o0 ! check alignment
bnz,pn %xcc, herr_badalign
CPU_GUEST_STRUCT(%g1, %g4)
RANGE_CHECK(%g4, %o0, %o1, herr_noraddr, %g3)
REAL_OFFSET(%g4, %o0, %g2, %g3)
ldx [%g1 + CPU_MMU_STATS_RA], %o1
stx %o0, [%g1 + CPU_MMU_STATS_RA]
stx %g2, [%g1 + CPU_MMU_STATS]
SET_SIZE(hcall_mmu_stat_area)
* mmu_map_addr - stuff ttes directly into the tlbs
ENTRY_NP(hcall_mmu_map_addr)
CPU_GUEST_STRUCT(%g1, %g6)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
sub %g4, 1, %g5 ! %g5 page mask
srlx %g2, 64 - 40 + 13, %g2
sllx %g2, 13, %g2 ! %g2 real address
xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
RANGE_CHECK(%g6, %g2, %g4, 3f, %g5)
REAL_OFFSET(%g6, %g2, %g2, %g4)
4: or %g3, %g2, %g1 ! %g1 new tte with pa
#endif /* umm, N2HACK? */
#ifdef N2HACKS /* umm, N2HACK? */
or %o0, %o1, %g2 ! %g2 tag
mov MMU_TAG_ACCESS, %g3 ! %g3 tag_access
sllx %g4, NI_TTE4V_L_SHIFT, %g4
andn %g1, %g4, %g1 ! %g1 tte (force clear lock bit)
and %o2, TTE_SZ_MASK, %g7
sllx %g7, 3, %g7 ! * _MMUSONE_MAPx_INCR
add %g7, MMUSTAT_I+_MMUSONE_MAPN0, %g7
add %g7, MMUSTAT_I+_MMUSONE_MAP0, %g7
INC_MMU_STAT(%g5, %g7, %g4, %g6)
! XXXQ need to do MMUSTAT_D, check %o3
set TLB_IN_4V_FORMAT, %g5 ! %g5 sun4v-style tte selection
stxa %g1, [%g0]ASI_DTLB_DATA_IN
stxa %g1, [%g5]ASI_DTLB_DATA_IN
! condition codes still set
stxa %g1, [%g0]ASI_ITLB_DATA_IN
stxa %g1, [%g5]ASI_ITLB_DATA_IN
RANGE_CHECK_IO(%g6, %g2, %g4, 4b, 1f, %g1, %g5)
IN_RANGE(%g1, %g2, %g4, FIRE_A_BASE0, FIRE_A_OFFSET0, FIRE_A_SIZE0,
IN_RANGE(%g1, %g2, %g4, FIRE_B_BASE0, FIRE_B_OFFSET0, FIRE_B_SIZE0,
IN_RANGE(%g1, %g2, %g4, FIRE_A_BASE1, FIRE_A_OFFSET1, FIRE_A_SIZE1,
IN_RANGE(%g1, %g2, %g4, FIRE_B_BASE1, FIRE_B_OFFSET1, FIRE_B_SIZE1,
#endif /* CONFIG_IOBYPASS */
SET_SIZE(hcall_mmu_map_addr)
ENTRY_NP(hcall_mmu_unmap_addr)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o2, herr_inval)
set (NCTXS - 1), %g2 ! 8K page mask
ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
stxa %o1, [%g1]ASI_MMU ! switch to new ctx
stxa %g0, [%g2]ASI_IMMU_DEMAP
stxa %g0, [%g2]ASI_DMMU_DEMAP
2: stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
SET_SIZE(hcall_mmu_unmap_addr)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_page)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_VA_CTX(%o2, %o3, herr_inval, %g2)
CHECK_MMU_FLAGS(%o4, herr_inval)
stxa %g0, [%g2]ASI_IMMU_DEMAP
stxa %g0, [%g2]ASI_DMMU_DEMAP
2: stxa %g3, [%g1]ASI_MMU ! restore primary ctx
SET_SIZE(hcall_mmu_demap_page)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_ctx)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_CTX(%o2, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
set TLB_DEMAP_CTX_TYPE, %g3
stxa %g0, [%g3]ASI_IMMU_DEMAP
stxa %g0, [%g3]ASI_DMMU_DEMAP
2: stxa %g7, [%g2]ASI_MMU ! restore primary ctx
SET_SIZE(hcall_mmu_demap_ctx)
* arg0/1 cpulist (%o0/%o1)
ENTRY_NP(hcall_mmu_demap_all)
bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
CHECK_MMU_FLAGS(%o2, herr_inval)
set TLB_DEMAP_ALL_TYPE, %g3
stxa %g0, [%g3]ASI_IMMU_DEMAP
stxa %g0, [%g3]ASI_DMMU_DEMAP
SET_SIZE(hcall_mmu_demap_all)
* mappings: pointer to current mappings, not modified
* scr1, scr2 scr3, scr4: scratch
#define UPDATE_PERM_MAPPINGS(mappings, vaddr, tte, scr1, scr2, scr3, scr4) \
/* first, try searching for an existing entry */ ;\
TTE_SHIFT_NOCHECK(tte, scr1, scr3) ;\
srlx vaddr, scr3, scr4 /* scr4 is current tag */ ;\
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), scr1 ;\
add mappings, scr1, scr2 ;\
MUTEX_ENTER(scr2 + MAPPING_LOCK, scr3) ;\
ldx [scr2 + MAPPING_TTE], scr3 ;\
ldx [scr2 + MAPPING_TAG], scr3 ;\
MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
subcc scr1, MAPPING_SIZE, scr1 ;\
brlz,pn scr1, 4f /* ? matching entry found */ ;\
/* found a valid matching entry, update its refcnt */ ;\
ld [scr2 + MAPPING_REFCNT], scr3 ;\
st scr3, [scr2 + MAPPING_REFCNT] ;\
MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
/* second, try searching for a free entry */ ;\
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), scr1 ;\
add mappings, scr1, scr2 ;\
MUTEX_ENTER(scr2 + MAPPING_LOCK, scr3) ;\
ldx [scr2 + MAPPING_TTE], scr3 ;\
/* check tag, in case of parellel insert just update refcnt */ ;\
ldx [scr2 + MAPPING_TAG], scr3 ;\
MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
subcc scr1, MAPPING_SIZE, scr1 ;\
brlz,pn scr1, herr_inval /* ? free entry found */ ;\
/* found a free entry, update its contents */ ;\
TTE_SHIFT_NOCHECK(tte, scr3, scr4) ;\
srlx vaddr, scr3, scr3 ;\
stx scr3, [scr2 + MAPPING_TAG] ;\
stx tte, [scr2 + MAPPING_TTE] ;\
st scr3, [scr2 + MAPPING_REFCNT] ;\
MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
* arg1 context (%o1) must be zero
ENTRY_NP(hcall_mmu_map_perm_addr)
CPU_GUEST_STRUCT(%g1, %g6)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o3, herr_inval)
TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
sub %g4, 1, %g5 ! %g5 page mask
srlx %g2, 64 - 40 + 13, %g2
sllx %g2, 13, %g2 ! %g2 real address
xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
RANGE_CHECK(%g6, %g2, %g4, herr_noraddr, %g5)
REAL_OFFSET(%g6, %g2, %g2, %g4)
or %g3, %g2, %g2 ! %g2 new tte with pa XXXshould be %g1
* need to track the mappings, keep track of which cpus have
* the same mapping, demap on each core when all strands on
* that core have unmapped it
/* Search for existing perm mapping */
add %g6, GUEST_PERM_MAPPINGS, %g1
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
ldx [%g4 + MAPPING_TTE], %g5
mov %g4, %g6 ! %g6 = last free offset
7: subcc %g3, MAPPING_SIZE, %g3
brz,pn %g6, herr_toomany ! No free entry found
stx %o0, [%g6 + MAPPING_VA]
stx %o2, [%g6 + MAPPING_TTE]
stx %o3, [%g6 + MAPPING_FLAGS]
6: /* found a valid mapping, check tag */
ldx [%g4 + MAPPING_VA], %g5
ldx [%g4 + MAPPING_FLAGS], %g5
stx %g5, [%g4 + MAPPING_FLAGS]
CPU2CORE_STRUCT(%g1, %g6)
add %g6, CORE_PERM_I_MAPPINGS, %g6
UPDATE_PERM_MAPPINGS(%g6, %o0, %g2, %g3, %g4, %g5, %g7)
CPU2CORE_STRUCT(%g1, %g6)
add %g6, CORE_PERM_D_MAPPINGS, %g6
UPDATE_PERM_MAPPINGS(%g6, %o0, %g2, %g3, %g4, %g5, %g7)
sllx %g4, NI_TTE4V_L_SHIFT, %g4
or %g1, %g4, %g1 ! add lock bit
and %o2, TTE_SZ_MASK, %g7
sllx %g7, 3, %g7 ! * _MMUSONE_MAPx_INCR
add %g7, MMUSTAT_I+_MMUSONE_MAPN0, %g7
add %g7, MMUSTAT_I+_MMUSONE_MAP0, %g7
INC_MMU_STAT(%g5, %g7, %g4, %g6)
! XXXQ need to do MMUSTAT_D, check %o3
! XXXQ separate counts for perm?
set TLB_IN_4V_FORMAT, %g5 ! sun4v-style tte selection
stxa %g1, [%g0]ASI_ITLB_DATA_IN
stxa %g1, [%g5]ASI_ITLB_DATA_IN
! condition codes still set
stxa %g1, [%g0]ASI_DTLB_DATA_IN
stxa %g1, [%g5]ASI_DTLB_DATA_IN
SET_SIZE(hcall_mmu_map_perm_addr)
* mappings: pointer to current mappings, not modified
* scr1, scr2, scr3, scr4: scratch
#define UNMAP_PERM_MAPPINGS(mappings, vaddr, scr1, scr2, scr3, scr4) \
/* XXX - ignore context */ ;\
mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), scr1 ;\
add mappings, scr1, scr2 ;\
MUTEX_ENTER(scr2 + MAPPING_LOCK, scr3) ;\
ldx [scr2 + MAPPING_TTE], scr3 ;\
TTE_SHIFT_NOCHECK(scr3, scr2, scr4) ;\
srlx vaddr, scr2, scr4 ;\
add mappings, scr1, scr2 ;\
ldx [scr2 + MAPPING_TAG], scr3 ;\
MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
subcc scr1, MAPPING_SIZE, scr1 ;\
brlz,pn scr1, herr_nomap /* ? matching entry found */ ;\
ld [scr2 + MAPPING_REFCNT], scr3 ;\
st scr3, [scr2 + MAPPING_REFCNT] ;\
stx %g0, [scr2 + MAPPING_TTE] ;\
MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
ENTRY_NP(hcall_mmu_unmap_perm_addr)
CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
CHECK_MMU_FLAGS(%o2, herr_inval)
#if 0 /* XXX Need to update the list of perm mappings */
! if no mapping found, return ENOMAP
add %g6, CORE_PERM_I_MAPPINGS, %g6
UNMAP_PERM_MAPPINGS(%g6, %o0, %g1, %g3, %g4, %g5)
add %g6, CORE_PERM_D_MAPPINGS, %g6
UNMAP_PERM_MAPPINGS(%g6, %o0, %g1, %g3, %g4, %g5)
set (NCTXS - 1), %g2 ! 8K page mask
ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
stxa %o1, [%g1]ASI_MMU ! switch to new ctx
stxa %g0, [%g2]ASI_IMMU_DEMAP
stxa %g0, [%g2]ASI_DMMU_DEMAP
2: stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
SET_SIZE(hcall_mmu_unmap_perm_addr)
* arg2 size (#entries, not #bytes) (%o2)
sllx %o2, Q_EL_SIZE_SHIFT, %g4 ! convert #entries to bytes
! size of 0 unconfigures queue
* Set the stored configuration to relatively safe values
* when un-initializing the queue
cmp %o2, MIN_QUEUE_ENTRIES
cmp %o2, MAX_QUEUE_ENTRIES
! check that size is a power of two
! Check base raddr alignment
sub %g4, 1, %g2 ! size in bytes to mask
bnz,pn %xcc, herr_badalign
ldx [%g1 + CPU_GUEST], %g6
RANGE_CHECK(%g6, %o1, %g4, herr_noraddr, %g2)
REAL_OFFSET(%g6, %o1, %g2, %g3)
! %g4 - queue size (#bytes)
cmp %o0, ERROR_RESUMABLE_QUEUE
cmp %o0, ERROR_NONRESUMABLE_QUEUE
stx %g2, [%g1 + CPU_ERRQNR_BASE]
stx %o1, [%g1 + CPU_ERRQNR_BASE_RA]
stx %o2, [%g1 + CPU_ERRQNR_SIZE]
stx %g4, [%g1 + CPU_ERRQNR_MASK]
mov ERROR_NONRESUMABLE_QUEUE_HEAD, %g3
mov ERROR_NONRESUMABLE_QUEUE_TAIL, %g3
stx %g2, [%g1 + CPU_ERRQR_BASE]
stx %o1, [%g1 + CPU_ERRQR_BASE_RA]
stx %o2, [%g1 + CPU_ERRQR_SIZE]
stx %g4, [%g1 + CPU_ERRQR_MASK]
mov ERROR_RESUMABLE_QUEUE_HEAD, %g3
mov ERROR_RESUMABLE_QUEUE_TAIL, %g3
stx %g2, [%g1 + CPU_DEVQ_BASE]
stx %o1, [%g1 + CPU_DEVQ_BASE_RA]
stx %o2, [%g1 + CPU_DEVQ_SIZE]
stx %g4, [%g1 + CPU_DEVQ_MASK]
mov DEV_MONDO_QUEUE_HEAD, %g3
mov DEV_MONDO_QUEUE_TAIL, %g3
stx %g2, [%g1 + CPU_CPUQ_BASE]
stx %o1, [%g1 + CPU_CPUQ_BASE_RA]
stx %o2, [%g1 + CPU_CPUQ_SIZE]
stx %g4, [%g1 + CPU_CPUQ_MASK]
mov CPU_MONDO_QUEUE_HEAD, %g3
mov CPU_MONDO_QUEUE_TAIL, %g3
* ret2 size (#entries) (%o2)
cmp %o0, ERROR_RESUMABLE_QUEUE
cmp %o0, ERROR_NONRESUMABLE_QUEUE
ldx [%g1 + CPU_ERRQNR_BASE_RA], %o1
ldx [%g1 + CPU_ERRQNR_SIZE], %o2
ldx [%g1 + CPU_ERRQR_BASE_RA], %o1
ldx [%g1 + CPU_ERRQR_SIZE], %o2
ldx [%g1 + CPU_DEVQ_BASE_RA], %o1
ldx [%g1 + CPU_DEVQ_SIZE], %o2
ldx [%g1 + CPU_CPUQ_BASE_RA], %o1
ldx [%g1 + CPU_CPUQ_SIZE], %o2
ENTRY_NP(hcall_cpu_start)
CPU_GUEST_STRUCT(%g6, %g7)
! Check pc (real) and tba (real) for validity
RANGE_CHECK(%g7, %o1, INSTRUCTION_SIZE, herr_noraddr, %g1)
RANGE_CHECK(%g7, %o2, REAL_TRAPTABLE_SIZE, herr_noraddr, %g1)
btst (INSTRUCTION_ALIGNMENT - 1), %o1 ! Check pc alignment
bnz,pn %xcc, herr_badalign
set REAL_TRAPTABLE_SIZE - 1, %g1
bnz,pn %xcc, herr_badalign
! Check current state of requested cpu
add %g1, %g2, %g1 ! %g1 = vcpus[n] offset
ldx [%g7 + %g1], %g1 ! %g1 = guest.vcpus[n]
!! %g1 requested CPU cpu struct
ldx [%g1 + CPU_STATUS], %g2
cmp %g2, CPU_STATE_STOPPED
/* Check to see if the mailbox is available */
add %g1, CPU_COMMAND, %g2
casxa [%g2]ASI_P, %g0, %g4
brnz,pn %g4, herr_wouldblock
stx %o1, [%g1 + CPU_CMD_ARG0]
stx %o2, [%g1 + CPU_CMD_ARG1]
stx %o3, [%g1 + CPU_CMD_ARG2]
#ifdef RESETCONFIG_BROKENTICK
stx %g2, [%g1 + CPU_CMD_ARG3]
mov CPU_CMD_STARTGUEST, %g2
stx %g2, [%g1 + CPU_COMMAND]
SET_SIZE(hcall_cpu_start)
HCALL_RET(EBADTRAP) /* XXX */
ENTRY_NP(hcall_cpu_state)
VCPUID2CPUP(%g1, %o0, %g2, herr_nocpu, %g3)
ldx [%g2 + CPU_STATUS], %o1
! ASSERT(%o1 != CPU_STATE_INVALID)
cmp %o1, CPU_STATE_LAST_PUBLIC
movgu %xcc, CPU_STATE_ERROR, %o1 ! Any non-API state is ERROR
SET_SIZE(hcall_cpu_state)
* arg0 real address (%o0)
* EOK : success or partial success
* ENORADDR : invalid (bad) address
* EBADALIGN : bad alignment
* ret1 length scrubbed (%o1)
ENTRY_NP(hcall_mem_scrub)
brz,pn %o1, herr_inval ! length 0 invalid
or %o0, %o1, %g1 ! address and length
btst L2_LINE_SIZE - 1, %g1 ! aligned?
bnz,pn %xcc, herr_badalign ! no: error
CPU_GUEST_STRUCT(%g6, %g5)
/* Check input arguments with guest map: error ret: r0=ENORADDR */
RANGE_CHECK(%g5, %o0, %o1, herr_noraddr, %g1)
REAL_OFFSET(%g5, %o0, %o0, %g1) /* real => physical address */
ldx [%g6 + CPU_ROOT], %g2 ! root (config) struct
ldx [%g2 + CONFIG_MEMSCRUB_MAX], %g5 ! limit (# cache lines)
/* Compute max # lines: */
srlx %o1, L2_LINE_SHIFT, %g2 ! # input cache lines
cmp %g5, %g2 ! g2 = min(inp, max)
movlu %xcc, %g5, %g2 ! ..
sllx %g2, L2_LINE_SHIFT, %o1 ! ret1 = count scrubbed
* This is the core of this function.
* All of the code before and after has been optimized to make this
* and the most common path the fastest.
wr %g0, ASI_BLK_INIT_P, %asi
stxa %g0, [%o0 + (0 * 8)]%asi
stxa %g0, [%o0 + (1 * 8)]%asi
stxa %g0, [%o0 + (2 * 8)]%asi
stxa %g0, [%o0 + (3 * 8)]%asi
stxa %g0, [%o0 + (4 * 8)]%asi
stxa %g0, [%o0 + (5 * 8)]%asi
stxa %g0, [%o0 + (6 * 8)]%asi
stxa %g0, [%o0 + (7 * 8)]%asi
bnz,pt %xcc, .ms_clear_mem
HCALL_RET(EOK) ! ret0=status, ret1=count
SET_SIZE(hcall_mem_scrub)
* arg0 real address (%o0)
* EOK : success, partial success
* EBADALIGN : bad alignment
brz,pn %o1, herr_inval ! len 0 not valid
set MEMSYNC_ALIGNMENT - 1, %g3
btst %g3, %g2 ! check for alignment of addr/len
bnz,pn %xcc, herr_badalign
RANGE_CHECK(%g5, %o0, %o1, herr_noraddr, %g1)
REAL_OFFSET(%g5, %o0, %o0, %g1) /* real => physical address ? */
* Clamp requested length at MEMSCRUB_MAX
ldx [%g5 + CPU_ROOT], %g2
ldx [%g2 + CONFIG_MEMSCRUB_MAX], %g3
sllx %g3, L2_LINE_SHIFT, %g3
!! %o1 MIN(requested length, max length)
* Push cache lines to memory
sub %o1, L2_LINE_SIZE, %o5
add %o0, %o5, %g1 ! hoisted delay slot (see below)
deccc L2_LINE_SIZE, %o5 ! get to next line
add %o0, %o5, %g1 ! %g1 is pa to flush
* arg0 dev handle [dev config pa] (%o0)
ENTRY_NP(hcall_intr_devino2sysino)
JMPL_DEVHANDLE2DEVOP(%o0, DEVOPSVEC_DEVINO2VINO, %g1, %g2, %g3, \
SET_SIZE(hcall_intr_devino2sysino)
* ret1 intr valid state (%o1)
ENTRY_NP(hcall_intr_getenabled)
JMPL_VINO2DEVOP(%o0, DEVOPSVEC_GETVALID, %g1, %g2, herr_inval)
SET_SIZE(hcall_intr_getenabled)
* arg1 intr valid state (%o1) 1: Valid 0: Invalid
ENTRY_NP(hcall_intr_setenabled)
cmp %o1, INTR_ENABLED_MAX_VALUE
JMPL_VINO2DEVOP(%o0, DEVOPSVEC_SETVALID, %g1, %g2, herr_inval)
SET_SIZE(hcall_intr_setenabled)
* ret1 (%o1) 0: idle 1: received 2: delivered
ENTRY_NP(hcall_intr_getstate)
JMPL_VINO2DEVOP(%o0, DEVOPSVEC_GETSTATE, %g1, %g2, herr_inval)
SET_SIZE(hcall_intr_getstate)
* arg1 (%o1) 0: idle 1: received 2: delivered
ENTRY_NP(hcall_intr_setstate)
JMPL_VINO2DEVOP(%o0, DEVOPSVEC_SETSTATE, %g1, %g2, herr_inval)
SET_SIZE(hcall_intr_setstate)
ENTRY_NP(hcall_intr_gettarget)
JMPL_VINO2DEVOP(%o0, DEVOPSVEC_GETTARGET, %g1, %g2, herr_inval)
SET_SIZE(hcall_intr_gettarget)
ENTRY_NP(hcall_intr_settarget)
JMPL_VINO2DEVOP(%o0, DEVOPSVEC_SETTARGET, %g1, %g2, herr_inval)
SET_SIZE(hcall_intr_settarget)
ENTRY_NP(hcall_cpu_yield)
#ifdef NIAGARA_ERRATUM_39
srlx %g1, VER_MASK_MAJOR_SHIFT, %g1
and %g1, VER_MASK_MAJOR_MASK, %g1
cmp %g1, 1 ! Check for Niagara 1.x
! xor ACTIVE to clear it on current strand
wr %g1, STR_STATUS_STRAND_ACTIVE, STR_STATUS_REG
SET_SIZE(hcall_cpu_yield)
ldub [%g1 + CPU_VID], %o1
* arg0 JBUS/DRAM performance register ID (%o0)
* ret1 Perf register value (%o1)
ENTRY_NP(hcall_niagara_getperf)
! check if JBUS/DRAM perf registers are accessible
set GUEST_PERFREG_ACCESSIBLE, %g2
brz,pn %g2, herr_noaccess
! check if perfreg within range
cmp %o0, NIAGARA_PERFREG_MAX
set niagara_perf_paddr_table - niagara_getperf_1, %g2
sllx %o0, 4, %o0 ! table entry offset
ldx [%g2], %g3 ! get perf reg paddr
ldx [%g3], %o1 ! read perf reg
SET_SIZE(hcall_niagara_getperf)
* arg0 JBUS/DRAM performance register ID (%o0)
* arg1 perf register value (%o1)
ENTRY_NP(hcall_niagara_setperf)
! check if JBUS/DRAM perf registers are accessible
set GUEST_PERFREG_ACCESSIBLE, %g2
brz,pn %g2, herr_noaccess
! check if perfreg within range
cmp %o0, NIAGARA_PERFREG_MAX
set niagara_perf_paddr_table - niagara_setperf_1, %g2
sllx %o0, 4, %o0 ! table entry offset
ldx [%g2], %g3 ! get perf reg paddr
ldx [%g2+8], %g1 ! get perf reg write mask
stx %g1, [%g3] ! write perf reg
SET_SIZE(hcall_niagara_setperf)
* Niagara JBUS/DRAM performance register physical address/mask table
* (order must match performance register ID assignment)
niagara_perf_paddr_table:
.xword JBI_PERF_CTL, 0xff
.xword JBI_PERF_COUNT, 0xffffffffffffffff
.xword DRAM_PERF_CTL0, 0xff
.xword DRAM_PERF_COUNT0, 0xffffffffffffffff
.xword DRAM_PERF_CTL1, 0xff
.xword DRAM_PERF_COUNT1, 0xffffffffffffffff
.xword DRAM_PERF_CTL2, 0xff
.xword DRAM_PERF_COUNT2, 0xffffffffffffffff
.xword DRAM_PERF_CTL3, 0xff
.xword DRAM_PERF_COUNT3, 0xffffffffffffffff
brz,pn %g2, herr_noaccess
RANGE_CHECK(%g1, %o0, 1, herr_noraddr, %g2)
REAL_OFFSET(%g1, %o0, %o1, %g2)
* arg0 physical address of routine to execute (%o0)
* ret0 status if noaccess, other SEP (somebody else's problem) (%o0)
brz,pn %g2, herr_noaccess
/* caller executes "done" */
* arg0 ra of dump buffer (%o0)
* arg1 size of dump buffer (%o1)
* ret1 size on success (%o1), min size on EINVAL
ENTRY_NP(hcall_dump_buf_update)
* XXX What locking is required between multiple strands
* XXX making simultaneous conf calls?
* Any error unconfigures any currently configured dump buf
* so set to unconfigured now to avoid special error exit code.
set GUEST_DUMPBUF_SIZE, %g4
set GUEST_DUMPBUF_RA, %g4
set GUEST_DUMPBUF_PA, %g4
! Size of 0 unconfigures the dump
blu,a,pn %xcc, herr_inval
mov %g2, %o1 ! return min size on EINVAL
btst (DUMPBUF_ALIGNMENT - 1), %o0
bnz,pn %xcc, herr_badalign
RANGE_CHECK(%g1, %o0, %o1, herr_noraddr, %g2)
REAL_OFFSET(%g1, %o0, %g2, %g3)
set GUEST_DUMPBUF_SIZE, %g4
set GUEST_DUMPBUF_RA, %g4
set GUEST_DUMPBUF_PA, %g4
! XXX Need to put something in the buffer
ldx [%g5 + CPU_ROOT], %g5
ldx [%g5 + CONFIG_VERSION], %g1
ldx [%g5 + CONFIG_VERSIONLEN], %g3
! ASSERT(%g3 <= [GUEST_DUMPBUF_SIZE])
SET_SIZE(hcall_dump_buf_update)
* ret1 current dumpbuf ra (%o1)
* ret2 current dumpbuf size (%o2)
ENTRY_NP(hcall_dump_buf_info)
set GUEST_DUMPBUF_SIZE, %g4
set GUEST_DUMPBUF_RA, %g4
SET_SIZE(hcall_dump_buf_info)
* arg0/1 cpulist (%o0/%o1)
* arg2 ptr to 64-byte-aligned data to send (%o2)
ENTRY(hcall_cpu_mondo_send)
btst CPULIST_ALIGNMENT - 1, %o1
bnz,pn %xcc, herr_badalign
btst MONDO_DATA_ALIGNMENT - 1, %o2
bnz,pn %xcc, herr_badalign
CPU_GUEST_STRUCT(%g3, %g6)
sllx %o0, CPULIST_ENTRYSIZE_SHIFT, %g5
RANGE_CHECK(%g6, %o1, %g5, herr_noraddr, %g7)
REAL_OFFSET(%g6, %o1, %g1, %g7)
RANGE_CHECK(%g6, %o2, MONDO_DATA_SIZE, herr_noraddr, %g7)
REAL_OFFSET(%g6, %o2, %g2, %g5)
!! %g4 true for EWOULDBLOCK
cmp %g6, CPULIST_ENTRYDONE
be,a,pn %xcc, another_cpu
inc CPULIST_ENTRYSIZE, %g1
#if GUEST_VCPUS_INCR == 8
mulx %g6, GUEST_VCPUS_INCR, %g6
ldx [%g3 + CPU_GUEST], %g5
add %g5, GUEST_VCPUS, %g5
be,pn %xcc, herr_inval ! Sending to self is illegal
/* Check to see if the mailbox is available */
add %g6, CPU_COMMAND, %g5
casxa [%g5]ASI_P, %g0, %g7
brnz,a,pn %g7, another_cpu ! target is busy, try another
/* Copy the mondo data into the target cpu's incoming buffer */
stx %g7, [%g6 + CPU_CMD_ARG0]
stx %g7, [%g6 + CPU_CMD_ARG1]
stx %g7, [%g6 + CPU_CMD_ARG2]
stx %g7, [%g6 + CPU_CMD_ARG3]
stx %g7, [%g6 + CPU_CMD_ARG4]
stx %g7, [%g6 + CPU_CMD_ARG5]
stx %g7, [%g6 + CPU_CMD_ARG6]
stx %g7, [%g6 + CPU_CMD_ARG7]
mov CPU_CMD_GUESTMONDO_READY, %g7
stx %g7, [%g6 + CPU_COMMAND]
/* Send a xcall vector interrupt to the target cpu */
ldub [%g6 + CPU_PID], %g7
sllx %g7, INT_VEC_DIS_VCID_SHIFT, %g5
or %g5, VECINTR_XCALL, %g5
stxa %g5, [%g0]ASI_INTR_UDB_W
mov CPULIST_ENTRYDONE, %g7
brnz,pn %g4, herr_wouldblock ! If remaining then EAGAIN
SET_SIZE(hcall_cpu_mondo_send)
#define TTRACE_RELOC_ADDR(addr, scr0, scr1) \
* arg0 ra of traptrace buffer (%o0)
* arg1 size of traptrace buffer in entries (%o1)
* ret1 minimum #entries on EINVAL, #entries on success (%o1)
ENTRY_NP(hcall_ttrace_buf_conf)
CPU_GUEST_STRUCT(%g1, %g2)
* Disable traptrace by restoring %htba to original traptable
* always do this first to make error returns easier.
setx htraptable, %g3, %g4
TTRACE_RELOC_ADDR(%g4, %g3, %g5)
! Clear buffer description
stx %g0, [%g1 + CPU_TTRACEBUF_SIZE] ! size must be first
stx %g0, [%g1 + CPU_TTRACEBUF_PA]
stx %g0, [%g1 + CPU_TTRACEBUF_RA]
* nentries (arg1) > 0 configures the buffer
* nentries == 0 disables traptrace and cleans up buffer config
btst TTRACE_ALIGNMENT - 1, %o0
bnz,pn %xcc, herr_badalign
! Check that #entries is >= TTRACE_MINIMUM_ENTRIES
cmp %o1, TTRACE_MINIMUM_ENTRIES
blu,a,pn %xcc, herr_inval
mov TTRACE_MINIMUM_ENTRIES, %o1
sllx %o1, TTRACE_RECORD_SZ_SHIFT, %g6 ! convert #entries to bytes
RANGE_CHECK(%g2, %o0, %g6, herr_noraddr, %g4)
REAL_OFFSET(%g2, %o0, %g3, %g4)
!! %g3 pa of traptrace buffer
stx %o0, [%g1 + CPU_TTRACEBUF_RA]
stx %g3, [%g1 + CPU_TTRACEBUF_PA]
stx %g6, [%g1 + CPU_TTRACEBUF_SIZE] ! size must be last
!! Initialize traptrace buffer header
mov TTRACE_RECORD_SIZE, %g2
stx %g2, [%g1 + CPU_TTRACE_OFFSET]
stx %g2, [%g3 + TTRACE_HEADER_OFFSET]
stx %g2, [%g3 + TTRACE_HEADER_LAST_OFF]
! %o1 return is the same as that passed in
SET_SIZE(hcall_ttrace_buf_conf)
* ret1 current traptrace buf ra (%o1)
* ret2 current traptrace buf size (%o2)
ENTRY_NP(hcall_ttrace_buf_info)
ldx [%g1 + CPU_TTRACEBUF_RA], %o1
ldx [%g1 + CPU_TTRACEBUF_SIZE], %o2
srlx %o2, TTRACE_RECORD_SZ_SHIFT, %o2 ! convert bytes to #entries
movrz %o2, %g0, %o1 ! ensure RA zero if size is zero
SET_SIZE(hcall_ttrace_buf_info)
* arg0 boolean: 0 = disable, non-zero = enable (%o0)
* ret1 previous enable state (0=disabled, 1=enabled) (%o1)
ENTRY_NP(hcall_ttrace_enable)
setx htraptracetable, %g1, %g2 ! %g2 = reloc'd &htraptracetable
TTRACE_RELOC_ADDR(%g2, %g1, %g3)
setx htraptable, %g1, %g3 ! %g3 = reloc'd &htraptable
TTRACE_RELOC_ADDR(%g3, %g1, %g4)
mov %g3, %g1 ! %g1 = (%o0 ? %g3 : %g2)
rdhpr %htba, %g4 ! %o1 = (%htba == %g2)
* Check that the guest has previously provided a buf for this cpu
* Check here since by now %o1 will be properly set
TTRACE_CHK_BUF(%g2, %g3, herr_inval)
SET_SIZE(hcall_ttrace_enable)
* arg0 boolean: 0 = disable, non-zero = enable (%o0)
* ret1 previous freeze state (0=disabled, 1=enabled) (%o1)
ENTRY_NP(hcall_ttrace_freeze)
movrnz %o0, 1, %o0 ! normalize to formal bool
! race conditions for two CPUs updating this not harmful
ldx [%g1 + GUEST_TTRACE_FRZ], %o1 ! current val for ret1
stx %o0, [%g1 + GUEST_TTRACE_FRZ]
SET_SIZE(hcall_ttrace_freeze)
* arg0 lower 16 bits stored in TTRACE_ENTRY_TAG (%o0)
* arg1 stored in TTRACE_ENTRY_F1 (%o1)
* arg2 stored in TTRACE_ENTRY_F2 (%o2)
* arg3 stored in TTRACE_ENTRY_F3 (%o3)
* arg4 stored in TTRACE_ENTRY_F4 (%o4)
ENTRY_NP(hcall_ttrace_addentry)
* Check that the guest has perviously provided a buf for this cpu
* return EINVAL if not configured, ignore (EOK) if frozen
TTRACE_PTR(%g3, %g2, herr_inval, hret_ok)
rdpr %tl, %g4 ! %g4 holds current tl
sub %g4, 1, %g3 ! %g3 holds tl of caller
mov %g3, %g1 ! save for TL field fixup
movrz %g3, 1, %g3 ! minimum is TL=1
TTRACE_STATE(%g2, TTRACE_TYPE_GUEST, %g3, %g5)
stb %g1, [%g2 + TTRACE_ENTRY_TL] ! overwrite with calc'd TL
wrpr %g4, %tl ! restore trap level
sth %o0, [%g2 + TTRACE_ENTRY_TAG]
stx %o1, [%g2 + TTRACE_ENTRY_F1]
stx %o2, [%g2 + TTRACE_ENTRY_F2]
stx %o3, [%g2 + TTRACE_ENTRY_F3]
stx %o4, [%g2 + TTRACE_ENTRY_F4]
TTRACE_NEXT(%g2, %g3, %g4, %g5)
SET_SIZE(hcall_ttrace_addentry)
* hcall_set_rtba - set the current cpu's rtba
* ret1 previous rtba (%o1)
CPU_GUEST_STRUCT(%g1, %g2)
! Return prior rtba value
ldx [%g1 + CPU_RTBA], %o1
! Check rtba for validity
RANGE_CHECK(%g2, %o0, REAL_TRAPTABLE_SIZE, herr_noraddr, %g7)
set REAL_TRAPTABLE_SIZE - 1, %g3
bnz,pn %xcc, herr_badalign
stx %o0, [%g1 + CPU_RTBA]
* hcall_get_rtba - return the current cpu's rtba
ldx [%g1 + CPU_RTBA], %o1
* vdev_genintr - generate a virtual interrupt
ENTRY_NP(hcall_vdev_genintr)
VINO2DEVINST(%g1, %o0, %g2, herr_inval)
add %g1, GUEST_VDEV_STATE, %g2
add %g2, VDEV_STATE_MAPREG, %g2
and %o0, VINTR_INO_MASK, %o0 ! get INO bits
mulx %o0, MAPREG_SIZE, %g1
HVCALL(vdev_intr_generate)
SET_SIZE(hcall_vdev_genintr)
#endif /* CONFIG_BRINGUP */
setx 0xfff0c2c000, %g1, %g2
ldxa [%o0]ASI_PRIMARY_LITTLE,%o0
stxa %o1, [%o0]ASI_PRIMARY_LITTLE