* ========== Copyright Header Begin ==========================================
* Hypervisor Software File: vpci_fire.s
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* - Do no alter or remove copyright notices
* - Redistribution and use of this software in source and binary forms, with
* or without modification, are permitted provided that the following
* - Redistribution of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistribution in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of Sun Microsystems, Inc. or the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
* You acknowledge that this software is not designed, licensed or
* intended for use in the design, construction, operation or maintenance of
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
.ident "@(#)vpci_fire.s 1.51 07/07/17 SMI"
#include <sys/asm_linkage.h>
#define REGNO2OFFSET(no, off) sllx no, 3, off
#define PCIDEV2FIREDEV(pci, fire) sllx pci, 4, fire
* CHK_FIRE_LINK_STATUS - Check status of Fire link. Returns status
* of 0 if the link is down.
* Delay Slot: not safe in a delay slot
#define CHK_FIRE_LINK_STATUS(firecookie, status, scr1) \
ldx [firecookie + FIRE_COOKIE_PCIE], status ;\
set FIRE_PLC_TLU_CTB_TLR_TLU_STS, scr1 ;\
ldx [status + scr1], scr1 ;\
and scr1, FIRE_TLU_STS_STATUS_MASK, scr1 ;\
cmp scr1, FIRE_TLU_STS_STATUS_DATA_LINK_ACTIVE ;\
#if FIRE_MSIEQ_SIZE != 0x28
#error "FIRE_MSIEQ_SIZE changed, breaks the shifts below"
#define MSIEQNUM2MSIEQ(firecookie, num, msieq, scr1, scr2) \
ldx [firecookie + FIRE_COOKIE_MSICOOKIE], msieq ;\
inc FIRE_MSI_COOKIE_EQ, scr1 ;\
! Ordered to minimize wasted space
BSS_GLOBAL(fire_a_equeue, (FIRE_NEQS * FIRE_EQSIZE), 512 KB)
BSS_GLOBAL(fire_a_iotsb, IOTSB_SIZE, 8 KB)
BSS_GLOBAL(fire_b_equeue, (FIRE_NEQS * FIRE_EQSIZE), 512 KB)
BSS_GLOBAL(fire_b_iotsb, IOTSB_SIZE, 8 KB)
BSS_GLOBAL(fire_virtual_intmap, 0x10, 0x10)
DATA_GLOBAL(fire_jbus_init_table)
.xword FIRE_SIZE(EBUS_SIZE), FIRE_EBUS_OFFSET_MASK
.xword FIRE_BAR_V(EBUS), FIRE_EBUS_OFFSET_BASE
.xword FIRE_BAR(EBUS), FIRE_EBUS_OFFSET_BASE
.xword FIRE_BAR(CFGIO(A)), FIRE_PCIE_A_IOCON_OFFSET_BASE
.xword FIRE_BAR(MEM32(A)), FIRE_PCIE_A_MEM32_OFFSET_BASE
.xword FIRE_BAR(MEM64(A)), FIRE_PCIE_A_MEM64_OFFSET_BASE
.xword FIRE_BAR(MEM32(B)), FIRE_PCIE_B_MEM32_OFFSET_BASE
.xword FIRE_BAR(CFGIO(B)), FIRE_PCIE_B_IOCON_OFFSET_BASE
.xword FIRE_BAR(MEM64(B)), FIRE_PCIE_B_MEM64_OFFSET_BASE
.xword FIRE_SIZE(MEM32_SIZE), FIRE_PCIE_A_MEM32_OFFSET_MASK
.xword FIRE_BAR_V(MEM32(A)), FIRE_PCIE_A_MEM32_OFFSET_BASE
.xword FIRE_SIZE(CFGIO_SIZE), FIRE_PCIE_A_IOCON_OFFSET_MASK
.xword FIRE_BAR_V(CFGIO(A)), FIRE_PCIE_A_IOCON_OFFSET_BASE
.xword FIRE_SIZE(MEM64_SIZE), FIRE_PCIE_A_MEM64_OFFSET_MASK
.xword FIRE_BAR_V(MEM64(A)), FIRE_PCIE_A_MEM64_OFFSET_BASE
.xword FIRE_SIZE(MEM32_SIZE), FIRE_PCIE_B_MEM32_OFFSET_MASK
.xword FIRE_BAR_V(MEM32(B)), FIRE_PCIE_B_MEM32_OFFSET_BASE
.xword FIRE_SIZE(CFGIO_SIZE), FIRE_PCIE_B_IOCON_OFFSET_MASK
.xword FIRE_BAR_V(CFGIO(B)), FIRE_PCIE_B_IOCON_OFFSET_BASE
.xword FIRE_SIZE(MEM64_SIZE), FIRE_PCIE_B_MEM64_OFFSET_MASK
.xword FIRE_BAR_V(MEM64(B)), FIRE_PCIE_B_MEM64_OFFSET_BASE
/* From Solaris Driver */
.xword 0x8000000000000000, FIRE_JBUS_PAR_CTL
.xword 0x000000000600c047, FIRE_JBC_FATAL_RESET_ENABLE_REG
.xword 0xffffffffffffffff, FIRE_JBC_LOGGED_ERROR_STATUS_REG_RW1C_ALIAS
.xword 0xffffffffffffffff, FIRE_JBC_INTERRUPT_MASK_REG
.xword 0xffffffffffffffff, FIRE_JBC_ERROR_LOG_EN_REG
.xword 0xffffffffffffffff, FIRE_JBC_ERROR_INT_EN_REG
.xword 0xfffc000000000000, FIRE_DLC_IMU_ICS_MEM_64_PCIE_OFFSET_REG
.xword 0x000007f513cb7000, FIRE_FIRE_CONTROL_STATUS
.xword -1,-1 /* End of Table */
SET_SIZE(fire_jbus_init_table)
DATA_GLOBAL(fire_leaf_init_table)
.xword 0xffffffffffffffff, FIRE_DLC_IMU_ICS_IMU_ERROR_LOG_EN_REG
.xword 0xffffffffffffffff, FIRE_DLC_IMU_ICS_IMU_INT_EN_REG
.xword 0xffffffffffffffff, FIRE_DLC_IMU_ICS_IMU_LOGGED_ERROR_STATUS_REG_RW1C_ALIAS
.xword 0x0000000000000010, FIRE_DLC_ILU_CIB_ILU_LOG_EN
.xword 0x0000001000000010, FIRE_DLC_ILU_CIB_ILU_INT_EN
* Changes to the CTO field of FIRE_PLC_TLU_CTB_TLR_TLU_CTL
* need to be reflected in the Niagara JBI_TRANS_TIMEOUT
* register. See the setup_jbi routine in setup.s.
#ifdef FIRE_ERRATUM_20_18
* Also, see below where NPRW_EN is masked off for Fire 2.0 but
* is set here for Fire 2.1 and later.
.xword 0x00000000da130001, FIRE_PLC_TLU_CTB_TLR_TLU_CTL
.xword 0xffffffffffffffff, FIRE_PLC_TLU_CTB_TLR_OE_LOG
.xword 0xffffffffffffffff, FIRE_PLC_TLU_CTB_TLR_OE_ERR_RW1C_ALIAS
.xword 0xffffffffffffffff, FIRE_PLC_TLU_CTB_TLR_OE_INT_EN
.xword 0x0000000000000000, FIRE_PLC_TLU_CTB_TLR_DEV_CTL
.xword 0x0000000000000040, FIRE_PLC_TLU_CTB_TLR_LNK_CTL
.xword 0xffffffffffffffff, FIRE_PLC_TLU_CTB_TLR_UE_LOG
.xword 0xffffffffffffffff, FIRE_PLC_TLU_CTB_TLR_UE_INT_EN
.xword 0xffffffffffffffff, FIRE_PLC_TLU_CTB_TLR_CE_LOG
.xword 0xffffffffffffffff, FIRE_PLC_TLU_CTB_TLR_CE_INT_EN
.xword 0x0000000000000000, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_RST
.xword 0x0000000000000000, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_DBG_CONFIG
.xword 0x00000000800000ff, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_INTERRUPT_MASK
.xword 0x0000000000000100, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LL_CONFIG
.xword 0x0000000000000003, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_FC_UP_CNTL
.xword 0x0000000000000070, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_ACKNAK_LATENCY
.xword 0x00000000000001bf, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_RPLAY_TMR_THHOLD
.xword 0x00000000ffff0000, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_RTRY_FIFO_PTR
.xword 0x0000000000000000, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_PHY_ERR_MSK
.xword 0x0000000000000000, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_RX_PHY_MSK
.xword 0x0000000000000050, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_TX_PHY_MSK
.xword 0x00000000002dc6c0, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LTSSM_CONFIG2
.xword 0x000000000007a120, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LTSSM_CONFIG3
.xword 0x0000000000029c00, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LTSSM_CONFIG4
.xword 0x0000000000000800, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LTSSM_CONFIG5
.xword 0x0000000000000000, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LTSSM_MSK
.xword 0x0000000000000000, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_GB_GL_MSK
.xword 0xffffffffffffffff, FIRE_DLC_IMU_ICS_DMC_INTERRUPT_MASK_REG
.xword 0x0000000000000000, FIRE_DLC_CRU_DMC_DBG_SEL_A_REG
.xword 0x0000000000000000, FIRE_DLC_CRU_DMC_DBG_SEL_B_REG
.xword 0xffffffffffffffff, FIRE_DLC_ILU_CIB_PEC_INT_EN
.xword 0xffffffffffffffff, FIRE_DLC_MMU_INV
.xword 0x0000000000000000, FIRE_DLC_MMU_TSB
.xword 0x0000000000000703, FIRE_DLC_MMU_CTL
.xword 0xffffffffffffffff, FIRE_DLC_MMU_INT_EN
.xword 0x00000000da130001, FIRE_PLC_TLU_CTB_TLR_TLU_CTL
.xword 0x00000000a06bf035, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_GB_GL_CONFIG2
.xword 0x0000000000000070, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_ACKNAK_LATENCY
.xword 0x00000000000000f6, FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_RPLAY_TMR_THHOLD
.xword 0x0000000002000000, FIRE_DLC_CRU_DMC_PCIE_CFG
.xword 0x000000007fff0000, FIRE_DLC_IMU_ICS_MSI_32_ADDR_REG
.xword 0x00000003ffff0000, FIRE_DLC_IMU_ICS_MSI_64_ADDR_REG
.xword -1, -1 /* End of Table */
SET_SIZE(fire_leaf_init_table)
* %i0 - global config pointer
setx fire_jbus_init_table, %g5, %g3
ldx [%i0 + CONFIG_RELOC], %o0
!! %g3 = fire_jbus_init_table base
ldx [%g1 + FIRE_COOKIE_JBUS], %g4
!! %g3 = fire_init_table base
!! %g4 = Fire Base JBus PA
and %g2, FIRE_JBUS_ID_MR_MASK, %g2
PRINT("HV:Unsupported Fire Version\r\n")
ldx [%g3 + 8], %g5 ! Offset
ldx [%g3 + 0], %g6 ! Data
setx fire_leaf_init_table, %g5, %g3
ldx [%i0 + CONFIG_RELOC], %o0
ldx [%g1 + FIRE_COOKIE_PCIE], %g4
ldx [%g1 + FIRE_COOKIE_PCIE+FIRE_COOKIE_SIZE], %g5
!! %g4 leaf A base address
!! %g5 leaf B base address
ldx [%g3 + 8], %g6 ! Offset
brz,pn %g2, 2f ! End of table?
ldx [%g3 + 0], %g2 ! Data
#ifdef FIRE_ERRATUM_20_18
* Don't set the NPWR_EN bit in TLU CTL for Fire 2.0
set FIRE_PLC_TLU_CTB_TLR_TLU_CTL, %l0
ldx [%g1 + FIRE_COOKIE_JBUS], %l0
brz,pn %l0, 4f ! shouldn't happen at this point
and %l0, FIRE_JBUS_ID_MR_MASK, %l0
set FIRE_TLU_CTL_NPWR_EN, %l0
! Setup Interrupt Mondo Data 0 register
set FIRE_DLC_IMU_RDS_MSI_INT_MONDO_DATA_0_REG, %g6
stx %g0, [%g4 + %g6] ! Leaf A
stx %g0, [%g5 + %g6] ! Leaf B
! Setup Interrupt Mondo Data 1 register
set FIRE_DLC_IMU_RDS_MSI_INT_MONDO_DATA_1_REG, %g6
sllx %g2, FIRE_DEVINO_SHIFT, %g2
stx %g2, [%g4 + %g6] ! Leaf A
sllx %g2, FIRE_DEVINO_SHIFT, %g2
stx %g2, [%g5 + %g6] ! Leaf B
! Setup interrupt mappings
STRAND_STRUCT(%g4) /* FIXME: what does it want the PID for?*/
ldub [%g4 + STRAND_ID], %g2
sllx %g2, JPID_SHIFT, %g6
! Select a Fire Interrupt Controller
and %g2, (NFIREINTRCONTROLLERS - 1), %g2
add %g2, FIRE_INTR_CNTLR_SHIFT, %g2
sllx %g6, FIRE_INTMR_MDO_MODE_SHIFT, %g6
sllx %g6, FIRE_INTMR_V_SHIFT, %g6
ldx [%g1 + FIRE_COOKIE_INTMAP], %g3
stx %g5, [%g3 + %g4] ! leaf A, modno 62
ldx [%g1 + FIRE_COOKIE_INTCLR], %g2
ldx [%g1 + FIRE_COOKIE_INTMAP+FIRE_COOKIE_SIZE], %g3
stx %g5, [%g3 + %g4] ! leaf B, mondo 62
ldx [%g1 + FIRE_COOKIE_INTCLR+FIRE_COOKIE_SIZE], %g2
ldx [%g1 + FIRE_COOKIE_INTMAP], %g3
stx %g5, [%g3 + %g4] ! leaf A, mondo 63
ldx [%g1 + FIRE_COOKIE_INTCLR], %g2
sllx %g6, FIRE_INTMR_V_SHIFT, %g6
ldx [%g1 + FIRE_COOKIE_INTMAP+FIRE_COOKIE_SIZE], %g3
stx %g5, [%g3 + %g4] ! leaf B, mondo 63
ldx [%g1 + FIRE_COOKIE_INTCLR+FIRE_COOKIE_SIZE], %g2
set FIRE_DLC_MMU_CTL, %g5
set FIRE_DLC_MMU_TSB, %g6
! Fire Leaf A PCIE reg base
ldx [%g1 + FIRE_COOKIE_PCIE], %g3
! Fire Leaf B PCIE reg base
ldx [%g1 + FIRE_COOKIE_PCIE+FIRE_COOKIE_SIZE], %g4
set FIRE_MMU_CSR_VALUE, %g2
ldx [%g1 + FIRE_COOKIE_IOTSB], %g2
ldx [%g1 + FIRE_COOKIE_IOTSB+FIRE_COOKIE_SIZE], %g5
or %g2, FIRE_TSB_SIZE, %g2
or %g5, FIRE_TSB_SIZE, %g5
! Leaf A MMU_TSB_CTRL reg
! Leaf B MMU_TSB_CTRL reg
!! %g3 = FIRE_A PCIE Base
!! %g4 = FIRE_B PCIE Base
ldx [%g1 + FIRE_COOKIE_MSIEQBASE], %g2
ldx [%g1 + FIRE_COOKIE_MSIEQBASE+FIRE_COOKIE_SIZE], %g5
setx MSI_EQ_BASE_BYPASS_ADDR, %g7, %g6
set FIRE_DLC_IMU_EQS_EQ_BASE_ADDRESS, %g6
#ifdef FIRE_ERRATUM_20_18
#define BDF2DEV(b, d, f) ((((b) << 8) | ((d) << 5) | (f)) << 8)
#define DV(v, d) (((d) << 16) | (v)) /* for 32-bit ASI_L */
#define VENDOR_PLX 0x10b5
#define DEVICE32_PLX8532 DV(VENDOR_PLX, 0x8532)
#define DEVICE32_PLX8516 DV(VENDOR_PLX, 0x8516)
/* Check Fire version for 2.0 */
ldx [%g1 + FIRE_COOKIE_JBUS], %l0
brz,pn %l0, .skip_plx_workaround ! shouldn't happen
and %l0, FIRE_JBUS_ID_MR_MASK, %l0
bne,pt %xcc, .skip_plx_workaround
/* Check if link is up, it should be for the PLX leaf on Ontario */
!! %g3 leaf A pcie base address
cmp %l4, 20 ! 20 * 50msec = 1 sec max delay
bge,pn %xcc, .skip_plx_leafa
CPU_MSEC_DELAY(50, %l5, %l6, %l7)
set FIRE_PLC_TLU_CTB_TLR_TLU_STS, %l0
and %l0, FIRE_TLU_STS_STATUS_MASK, %l0
cmp %l0, FIRE_TLU_STS_STATUS_DATA_LINK_ACTIVE
/* calculate PA of config address for BDF 2.0.0, offset 0 */
set BDF2DEV(2, 0, 0), %l0
ldx [%g1 + FIRE_COOKIE_CFG], %l2
!! %l1 PA of config address of BDF 2.0.0
/* fire link up, but downlink needs a little more time */
CPU_MSEC_DELAY(200, %l5, %l6, %l7)
/* Check for PLX 8532/8516 */
set DEVICE32_PLX8532, %l3
set DEVICE32_PLX8516, %l3
bne,pn %xcc, .skip_plx_leafa
stx %l1, [%g1 + FIRE_COOKIE_EXTRACFGRDADDRPA]
#ifdef PLX_ERRATUM_LINK_HACK
!! %l1 = plx config base addr
!! %l2 = fire leaf base addr
HVCALL(fire_plx_reset_hack)
#endif /* PLX_ERRATUM_LINK_HACK */
/* Check if link is up, it should be for the PLX leaf on Ontario */
!! %g4 leaf B pcie base address
cmp %l4, 20 ! 20 * 50msec = 1 sec delay max
bge,pn %xcc, .skip_plx_leafb
CPU_MSEC_DELAY(50, %l5, %l6, %l7)
set FIRE_PLC_TLU_CTB_TLR_TLU_STS, %l0
and %l0, FIRE_TLU_STS_STATUS_MASK, %l0
cmp %l0, FIRE_TLU_STS_STATUS_DATA_LINK_ACTIVE
/* calculate PA of config address for BDF 2.0.0, offset 0 */
set BDF2DEV(2, 0, 0), %l0
ldx [%g1 + FIRE_COOKIE_SIZE + FIRE_COOKIE_CFG], %l2
!! %l1 PA of config address of BDF 2.0.0
/* fire link up, but downlink needs a little more time */
CPU_MSEC_DELAY(200, %l5, %l6, %l7)
/* Check for PLX 8532/8516 */
set DEVICE32_PLX8532, %l3
set DEVICE32_PLX8516, %l3
bne,pn %xcc, .skip_plx_leafb
stx %l1, [%g1 + FIRE_COOKIE_SIZE + FIRE_COOKIE_EXTRACFGRDADDRPA]
#ifdef PLX_ERRATUM_LINK_HACK
!! %l1 = plx config base addr
!! %l2 = fire leaf base addr
!! %g3 = fire leaf A base
!! %g4 = fire leaf B base
HVCALL(fire_plx_reset_hack)
#endif /* PLX_ERRATUM_LINK_HACK */
* void fire_config_bypass(fire_dev_t *firep, bool_t enable);
* Configures the bypass mode of a given fire PCI-E root complex.
* %o1 = enable (true) / disable (false) bypass mode
ENTRY(fire_config_bypass)
ldx [%o0 + FIRE_COOKIE_PCIE], %o2
set FIRE_DLC_MMU_CTL, %o4
SET_SIZE(fire_config_bypass)
#ifdef PLX_ERRATUM_LINK_HACK
* Workaround for PLX link training problem
#define PLX_HACK_STATUS_LEAFB_SHIFT 4
#define PLX_HACK_STATUS_PORT1 0x1
#define PLX_HACK_STATUS_PORT2 0x2
#define PLX_HACK_STATUS_PORT8 0x4
#define PLX_HACK_STATUS_PORT9 0x8
#define PLX_PORT_OFFSET 0x1000
#define PLX_UE_STATUS_REG_OFFSET 0xfb8
#define PLX_UESR_TRAINING_ERROR 0x1
#define PLX_VC0_RSRC_STATUS_HI 0x162
#define PLX_VC0_RSRC_NEGPEND_SHIFT 0x1
#define PLX_PCIE_CAPABILITY_HI 0x6a
#define PLX_PCIE_PORTTYPE_MASK 0xf0
#define PLX_PCIE_PORTTYPE_UPSTREAM 0x50
#define PLX_CONFIG_CMD 0x4
#define PLX_CONFIG_CMD_MEMENABLE 0x2
#define PLX_CONFIG_BAR0 0x10
* %l1 - config base physical address
* %l3 - mem32 base physical address
/* Uses %l4/%l5 as scratch */
#define PLX_MEM_STOREB(value, offset) \
/* Uses %l4/%l5 as scratch */
#define PLX_MEM_STOREW(value, offset) \
stuha %l4, [%l3 + %l5]ASI_P_LE
/* Uses %l4/%l5 as scratch */
#define PLX_MEM_STOREL(value, offset) \
stuwa %l4, [%l3 + %l5]ASI_P_LE
/* Uses %l5 as scratch */
#define PLX_MEM_FETCHW(offset, dest) \
lduha [%l3 + %l5]ASI_P_LE, dest
/* Uses %l5 as scratch */
#define PLX_MEM_FETCHL(offset, dest) \
lduwa [%l3 + %l5]ASI_P_LE, dest
/* Uses %l4/%l5 as scratch */
#define PLX_CFG_STOREW(value, offset) \
stuha %l4, [%l1 + %l5]ASI_P_LE
#define PLX_CFG_STOREREGW(reg, offset) \
stuha reg, [%l1 + %l5]ASI_P_LE
/* Uses %l4/%l5 as scratch */
#define PLX_CFG_STOREL(value, offset) \
stuwa %l4, [%l1 + %l5]ASI_P_LE
#define PLX_CFG_STOREREGL(reg, offset) \
stuwa reg, [%l1 + %l5]ASI_P_LE
/* Uses %l5 as scratch */
#define PLX_CFG_FETCHB(offset, dest) \
/* Uses %l5 as scratch */
#define PLX_CFG_FETCHW(offset, dest) \
lduha [%l1 + %l5]ASI_P_LE, dest
/* Uses %l5 as scratch */
#define PLX_CFG_FETCHL(offset, dest) \
lduwa [%l1 + %l5]ASI_P_LE, dest
/* Uses %g5/%g6 as scratch, invokes PLX_FETCH */
#define PLX_TRAINING_ERROR_nz(port) \
PLX_MEM_FETCHW(((port * PLX_PORT_OFFSET) + \
PLX_VC0_RSRC_STATUS_HI), %g5) ;\
srlx %g5, PLX_VC0_RSRC_NEGPEND_SHIFT, %g5 ;\
PLX_MEM_FETCHL(((port * PLX_PORT_OFFSET) + \
PLX_UE_STATUS_REG_OFFSET), %g6) ;\
btst PLX_UESR_TRAINING_ERROR, %g5
* fire_plx_reset_hack - work around PLX link training problem
* %l1 PLX config space address
* %g1 base of Fire state structures ("cookies")
* %l3-%l6,%g5,%g6 available
ENTRY_NP(fire_plx_reset_hack)
* See if this hack has been disabled by the SP
ldx [%i0 + CONFIG_IGNORE_PLX_LINK_HACK], %l6
* Check for rev AA and upstream port
bne,pt %xcc, .fire_plx_reset_hack_done
PLX_CFG_FETCHW(PLX_PCIE_CAPABILITY_HI, %l6)
and %l6, PLX_PCIE_PORTTYPE_MASK, %l6
cmp %l6, PLX_PCIE_PORTTYPE_UPSTREAM
bne,pt %xcc, .fire_plx_reset_hack_done
* Get MEM32 base address for the appropriate leaf
setx FIRE_BAR(MEM32(A)), %l6, %l3
setx FIRE_BAR(MEM32(B)), %l6, %l4
cmp %l2, %g3 ! compare current leaf to leaf A addr
!! %l3 = plx mem32 base addr
/* It's probably not enabled to respond in cmd register */
PLX_CFG_FETCHW(PLX_CONFIG_CMD, %g5)
or %g5, PLX_CONFIG_CMD_MEMENABLE, %g5
PLX_CFG_STOREREGW(%g5, PLX_CONFIG_CMD)
/* Map in PLX mem space BAR */
PLX_CFG_STOREREGL(%g0, PLX_CONFIG_BAR0)
mov PLX_CONFIG_BAR0, %l5 ! BAR0
lduwa [%l1 + %l5]ASI_P_LE, %g0 ! force completion of stores
* If the link is not up, and a training error was logged
!! %l3 = plx mem32 base addr
!! %l2 = fire pcie leaf base addr
!! %l1 = plx cfg base addr
* Check for link training errors
mov 0, %o0 ! failure flag
PLX_TRAINING_ERROR_nz(1) ! port 1
bset PLX_HACK_STATUS_PORT1, %o0
0: PLX_TRAINING_ERROR_nz(2) ! port 2
bset PLX_HACK_STATUS_PORT2, %o0
0: PLX_TRAINING_ERROR_nz(8) ! port 8
bset PLX_HACK_STATUS_PORT8, %o0
0: PLX_TRAINING_ERROR_nz(9) ! port 9
bset PLX_HACK_STATUS_PORT9, %o0
brz,pt %o0, .fire_plx_reset_hack_done
* PLX encountered a link training problem, tell vbsc to reset
* %o0 contains a bitmask of ports on the plx that failed
* For leaf B we shift the bitmask up by 4 bits:
* |B9|B8|B2|B1|A9|A8|A2|A1|
movne %xcc, PLX_HACK_STATUS_LEAFB_SHIFT, %g1
/* spin until vbsc resets the system */
.fire_plx_reset_hack_done:
PLX_CFG_STOREREGW(%g0, PLX_CONFIG_CMD)
SET_SIZE(fire_plx_reset_hack)
#endif /* PLX_ERRATUM_LINK_HACK */
* %g1 Fire Cookie Pointer
* arg0 dev config pa (%o0)
ENTRY_NP(fire_devino2vino)
!! %g1 pointer to FIRE_COOKIE
ldx [%g1 + FIRE_COOKIE_HANDLE], %g2
lduh [%g1 + FIRE_COOKIE_INOMAX], %g3
lduh [%g1 + FIRE_COOKIE_VINO], %g4
SET_SIZE(fire_devino2vino)
* %g1 Fire Cookie Pointer
* ret1 intr valid state (%o1)
ENTRY_NP(fire_intr_getvalid)
!! %g1 pointer to FIRE_COOKIE
ldx [%g1 + FIRE_COOKIE_INTMAP], %g2
and %o0, FIRE_DEVINO_MASK, %g4
movrlz %g5, INTR_ENABLED, %o1
SET_SIZE(fire_intr_getvalid)
* %g1 Fire Cookie Pointer
ENTRY_NP(_fire_intr_setvalid)
!! %g1 = pointer to FIRE_COOKIE
ldx [%g1 + FIRE_COOKIE_INTMAP], %g6
and %g2, FIRE_DEVINO_MASK, %g4
sllx %g6, FIRE_INTMR_V_SHIFT, %g6
sllx %g3, FIRE_INTMR_V_SHIFT, %g6
SET_SIZE(_fire_intr_setvalid)
* %g1 Fire Cookie Pointer
* arg1 intr valid state (%o1) 1: Valid 0: Invalid
ENTRY_NP(fire_intr_setvalid)
!! %g1 = pointer to FIRE_COOKIE
HVCALL(_fire_intr_setvalid)
SET_SIZE(fire_intr_setvalid)
* %g1 Fire Cookie Pointer
* ret1 (%o1) 1: Pending / 0: Idle
ENTRY_NP(fire_intr_getstate)
!! %g1 pointer to FIRE_COOKIE
ldx [%g1 + FIRE_COOKIE_INTCLR], %g2
and %o0, FIRE_DEVINO_MASK, %g4
sub %g3, FIRE_INTR_RECEIVED, %g4
movrz %g4, INTR_DELIVERED, %o1
movrnz %g4, INTR_RECEIVED, %o1
movrz %g3, INTR_IDLE, %o1
SET_SIZE(fire_intr_getstate)
* %g1 Fire Cookie Pointer
* arg1 (%o1) 1: Pending / 0: Idle XXX
ENTRY_NP(fire_intr_setstate)
!! %g1 pointer to FIRE_COOKIE
HVCALL(_fire_intr_setstate)
SET_SIZE(fire_intr_setstate)
ENTRY_NP(_fire_intr_setstate)
ldx [%g1 + FIRE_COOKIE_INTCLR], %g5
and %g2, FIRE_DEVINO_MASK, %g4
movrz %g3, FIRE_INTR_IDLE, %g3
movrnz %g3, FIRE_INTR_RECEIVED, %g3
SET_SIZE(_fire_intr_setstate)
* %g1 Fire Cookie Pointer
ENTRY_NP(fire_intr_gettarget)
!! %g1 pointer to FIRE_COOKIE
HVCALL(_fire_intr_gettarget)
PID2VCPUP(%g3, %g4, %g5, %g6)
ldub [%g4 + CPU_VID], %o1
SET_SIZE(fire_intr_gettarget)
ENTRY_NP(_fire_intr_gettarget)
ldx [%g1 + FIRE_COOKIE_INTMAP], %g3
and %g2, FIRE_DEVINO_MASK, %g4
srlx %g3, JPID_SHIFT, %g3
/* FIXME: What is this trying to do ?! */
ldx [%g3 + CONFIG_VCPUS], %g3
VCPU2STRAND_STRUCT(%g3, %g3)
ldub [%g3 + STRAND_ID], %g3
SET_SIZE(_fire_intr_gettarget)
* %g1 Fire Cookie Pointer
ENTRY_NP(fire_intr_settarget)
!! %g1 pointer to FIRE_COOKIE
VCPUID2CPUP(%g3, %o1, %g4, herr_nocpu, %g5)
IS_CPU_IN_ERROR(%g4, %g5)
be,pn %xcc, herr_cpuerror
VCPU2STRAND_STRUCT(%g4, %g3)
ldub [%g3 + STRAND_ID], %g3
and %o0, FIRE_DEVINO_MASK, %g2
HVCALL(_fire_intr_settarget)
SET_SIZE(fire_intr_settarget)
* %g3 = Physical CPU number
ENTRY_NP(_fire_intr_settarget)
ldx [%g1 + FIRE_COOKIE_INTMAP], %g4
!! %g3 Physical CPU number
sllx %g6, JPID_SHIFT, %g6
sllx %g3, JPID_SHIFT, %g6
! Clear Interrupt Controller bits
andn %g5, (FIRE_INTR_CNTLR_MASK << FIRE_INTR_CNTLR_SHIFT), %g5
! Select a Fire Interrupt Controller
and %g3, (NFIREINTRCONTROLLERS - 1), %g3
add %g3, FIRE_INTR_CNTLR_SHIFT, %g3
sllx %g6, FIRE_INTMR_MDO_MODE_SHIFT, %g6
SET_SIZE(_fire_intr_settarget)
* %g1 Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg3 tte attributes (%o3)
* arg4 io_page_list_p (%o4)
* ret1 #ttes mapped (%o1)
!! %g1 pointer to FIRE_COOKIE
and %o3, HVIO_TTE_ATTR_MASK, %g7
! Check io_page_list_p alignment
! and make sure it is 8 byte aligned
bnz,pn %xcc, herr_badalign
ldx [%g1 + FIRE_COOKIE_IOTSB], %g5
movgu %xcc, IOMMU_MAP_MAX, %o2
! Check to ensure the end of the mapping is still within
inc %g3 ! make sure last mapping succeeds.
sllx %o2, IOTTE_SHIFT, %g6
RA2PA_RANGE_CONV_UNK_SIZE(%g2, %o4, %g6, herr_noraddr, %g7, %g3)
ldx [%g1 + FIRE_COOKIE_MMU], %g1
sllx %g7, FIRE_IOTTE_V_SHIFT, %g7
!! %g1 = Fire MMU Reg Block Base
!! %o3 = TTE Attributes + Valid Bit
sllx %o1, IOTTE_SHIFT, %o1
!! %g1 = Fire MMU Reg Block Base
!! %o3 = TTE Attributes + Valid Bit
srlx %g3, FIRE_PAGESIZE_8K_SHIFT, %o0
sllx %o0, FIRE_PAGESIZE_8K_SHIFT, %o0
bne,pn %xcc, .fire_badalign
RA2PA_RANGE_CONV(%g2, %o0, %g4, .fire_check_ldc_ra, %g7, %g3)
LDC_IOMMU_GET_PA(%g2, %o0, %g3, %g7, .fire_noraddr, .fire_noaccess)
!! %g1 = Fire MMU Reg Block Base
!! %o3 = TTE Attributes + Valid Bit
and %g5, (1 << 6) - 1, %o0
stx %g5, [%g1+0x100] ! IOMMU Flush
add %g5, IOTTE_SIZE, %g5 ! *IOTSB++
add %g6, IOTTE_SIZE, %g6 ! *PAGELIST++
brgz,pt %o2, .fire_iommu_map_loop
brz,pn %o1, herr_noaccess
brz,pn %o1, herr_badalign
* %g1 Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg3 tte attributes (%o3)
* arg4 io_page_list_p (%o4)
* ret1 #ttes mapped (%o1)
ENTRY_NP(fire_iommu_map_v2)
set HVIO_TTE_ATTR_MASK_V2, %g7
and %o3, HVIO_TTE_ATTR_MASK, %o3
SET_SIZE(fire_iommu_map_v2)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
ENTRY_NP(fire_iommu_getmap)
ALTENTRY(fire_iommu_getmap_v2)
!! %g1 pointer to FIRE_COOKIE
ldx [%g1 + FIRE_COOKIE_IOTSB], %g5
sllx %o1, IOTTE_SHIFT, %g2
!! %g1 = Fire Cookie Pointer
sllx %g5, (64-JBUS_PA_SHIFT), %g3
srlx %g3, (64-JBUS_PA_SHIFT+FIRE_PAGESIZE_8K_SHIFT), %g3
sllx %g3, FIRE_PAGESIZE_8K_SHIFT, %g3
PA2RA_CONV(%g2, %g3, %o2, %g7, %g4) ! PA -> RA (%o2)
brnz %g4, herr_nomap /* invalid translation */
and %g5, HVIO_TTE_ATTR_MASK, %o1
movrgez %g5, 0, %o1 ! Clear the attributes if V=0
SET_SIZE(fire_iommu_getmap)
* %g1 Fire Cookie Pointer
* arg0 dev config pa (%o0)
* ret1 #ttes demapped (%o1)
ENTRY_NP(fire_iommu_unmap)
!! %g1 pointer to FIRE_COOKIE
ldx [%g1 + FIRE_COOKIE_IOTSB], %g5
movgu %xcc, IOMMU_MAP_MAX, %o2
inc %g3 ! make sure last mapping succeeds.
sllx %o1, IOTTE_SHIFT, %g2
!! %g1 = Fire Cookie Pointer
!! %o1 = #ttes unmapped so far
ldx [%g1 + FIRE_COOKIE_MMU], %g1
srlx %g4, FIRE_PAGESIZE_8K_SHIFT, %g4
sllx %g4, FIRE_PAGESIZE_8K_SHIFT, %g4
! Flush Fire TSB here XXXX
SET_SIZE(fire_iommu_unmap)
ENTRY_NP(fire_iommu_getbypass)
!! %g1 pointer to FIRE_COOKIE
! Check to see if bypass is allowed
! (We could check the pcie structure, but what better way
! than to check and see what Fire itself has enabled after config)
! FIXME: Note S10U3 has a bug in the px driver that will assume
! bypass is available if anything other than ENOTSUPP is returned
! .. so if the other tests also fail and return EINVAL or EBADRADDR
! *before* the bypass enable test then Solaris assumes bypass *is*
! supported. For this reason, the not supported test must be first.
ldx [%g1 + FIRE_COOKIE_PCIE], %g4
set FIRE_DLC_MMU_CTL, %g5
andcc %g5, FIRE_MMU_CSR_BE, %g0
be,pn %xcc, herr_notsupported
andncc %o2, HVIO_IO_ATTR_MASK, %g0
RA2PA_RANGE_CONV(%g2, %o1, 1, herr_noraddr, %g4, %g3)
setx FIRE_IOMMU_BYPASS_BASE, %g5, %g4
SET_SIZE(fire_iommu_getbypass)
* arg0 dev config pa (%o0)
ENTRY_NP(fire_config_get)
!! %g1 pointer to FIRE_COOKIE
! If leaf is blacklisted fail access
lduw [%g1 + FIRE_COOKIE_BLACKLIST], %g3
brnz,a,pn %g3, .skip_config_get
ldx [%g1 + FIRE_COOKIE_CFG], %g3
!! %g2 = PCIE config space offset
!! %g3 = CFG base address
!! %o2 = Error return value
CHK_FIRE_LINK_STATUS(%g1, %g5, %g6)
brz,pn %g5, .skip_config_get
!! %g2 = PCIE config space offset
!! %g3 = CFG base address
DISABLE_PCIE_RWUC_ERRORS(%g1, %g5, %g6, %g7)
lduwa [%g3 + %g2]ASI_P_LE, %o2
lduha [%g3 + %g2]ASI_P_LE, %o2
ENABLE_PCIE_RWUC_ERRORS(%g1, %g5, %g6, %g7)
SET_SIZE(fire_config_get)
* arg0 dev config pa (%o0)
ENTRY_NP(fire_config_put)
!! %g1 pointer to FIRE_COOKIE
! If leaf is blacklisted fail access
lduw [%g1 + FIRE_COOKIE_BLACKLIST], %g3
brnz,a,pn %g3, .skip_config_put
ldx [%g1 + FIRE_COOKIE_CFG], %g3
!! %g2 = PCIE config space offset
!! %g3 = CFG base address
CHK_FIRE_LINK_STATUS(%g1, %g5, %g6)
brz,pn %g5, .skip_config_put
!! %g2 = PCIE config space offset
!! %g3 = CFG base address
DISABLE_PCIE_RWUC_ERRORS(%g1, %g5, %g6, %g7)
stwa %o4, [%g3 + %g2]ASI_P_LE
stha %o4, [%g3 + %g2]ASI_P_LE
#ifdef FIRE_ERRATUM_20_18
ldx [%g1 + FIRE_COOKIE_EXTRACFGRDADDRPA], %g6
andn %g2, PCI_CFG_OFFSET_MASK, %g2
ENABLE_PCIE_RWUC_ERRORS(%g1, %g5, %g6, %g7)
SET_SIZE(fire_config_put)
* arg0 (%g1) = Fire Cookie
* arg3 (%g3) = size (1, 2, 4)
* ret0 = status (1 fail, 0 pass)
!! %g1 = fire cookie (pointer)
!! %g3 = size (1 byte, 2 bytes, 4 bytes)
DISABLE_PCIE_RWUC_ERRORS(%g1, %g4, %g5, %g6)
ldx [%g1 + FIRE_COOKIE_CFG], %g4
lduha [%g4 + %g2]ASI_P_LE, %g3
lduwa [%g4 + %g2]ASI_P_LE, %g3
ENABLE_PCIE_RWUC_ERRORS(%g1, %g4, %g2, %g6)
* bool_t pci_config_get(uint64_t firep, uint64_t offset, int size,
STRAND_PUSH(%g2, %g6, %g7)
STRAND_PUSH(%g3, %g6, %g7)
STRAND_PUSH(%g4, %g6, %g7)
* arg0 (%o0) = Fire Cookie
* arg3 (%o2) = size (1, 2, 4)
* ret0 (%o0)= status (1 fail, 0 pass)
* %g1, %g5, %g6, %g7 Clobbered.
* bool_t pci_config_put(uint64_t firep, uint64_t offset, int size,
!! %o0 = fire cookie (pointer)
!! %o2 = size (1 byte, 2 bytes, 4 bytes)
ldx [%o0 + FIRE_COOKIE_CFG], %o4
DISABLE_PCIE_RWUC_ERRORS(%o0, %g5, %g7, %g1)
#ifdef FIRE_ERRATUM_20_18
ldx [%o0 + FIRE_COOKIE_EXTRACFGRDADDRPA], %g5
andn %o1, PCI_CFG_OFFSET_MASK, %g1
ENABLE_PCIE_RWUC_ERRORS(%o0, %g5, %g7, %g1)
* arg0 (%g1) = Fire Cookie
* arg3 (%g3) = size (1, 2, 4)
* ret0 = status (1 fail, 0 pass)
!! %g1 = fire cookie (pointer)
!! %g3 = size (1 byte, 2 bytes, 4 bytes)
DISABLE_PCIE_RWUC_ERRORS(%g1, %g4, %g5, %g6)
ldx [%g1 + FIRE_COOKIE_CFG], %g4
ENABLE_PCIE_RWUC_ERRORS(%g1, %g4, %g2, %g6)
* bool_t pci_io_peek(uint64_t firep, uint64_t address, int size,
STRAND_PUSH(%g2, %g6, %g7)
STRAND_PUSH(%g3, %g6, %g7)
STRAND_PUSH(%g4, %g6, %g7)
* arg0 (%o0) = Fire Cookie
* arg2 (%o2) = size (1, 2, 4)
* arg4 (%o4) = PCI device
* ret0 (%o0)= status (1 fail, 0 pass)
* %g1, %g5, %g6, %g7 Clobbered.
* bool_t pci_io_put(uint64_t firep, uint64_t address, int size,
!! %o0 = fire cookie (pointer)
!! %o2 = size (1 byte, 2 bytes, 4 bytes)
!! %o4 = Config space offset for device
DISABLE_PCIE_RWUC_ERRORS(%o0, %g5, %g7, %g1)
#ifdef FIRE_ERRATUM_20_18
ldx [%o0 + FIRE_COOKIE_EXTRACFGRDADDRPA], %g5
! Read from PCI config space as error barrier
ldx [%o0 + FIRE_COOKIE_CFG], %g5
ENABLE_PCIE_RWUC_ERRORS(%o0, %g5, %g7, %g1)
* %g1 = Fire Cookie Pointer
* arg3 direction (%o3) (one or both of 1: for device 2: for cpu)
* ret1 #bytes synced (%o1)
RA2PA_RANGE_CONV_UNK_SIZE(%g2, %o1, %o2, herr_noraddr, %g4, %g3)
* %g1 = Fire Cookie Pointer
RANGE_CHECK_IO(%g2, %o1, %o2, .fire_io_peek_found, herr_noraddr,
CHK_FIRE_LINK_STATUS(%g1, %g5, %g6)
brz,a,pn %g5, .skip_io_peek
DISABLE_PCIE_RWUC_ERRORS(%g1, %g5, %g6, %g7)
1: set STRAND_IO_PROT, %g6
ENABLE_PCIE_RWUC_ERRORS(%g1, %g5, %g6, %g7)
* %g1 = Fire Cookie Pointer
ldx [%g1 + FIRE_COOKIE_CFG], %g3
RANGE_CHECK_IO(%g2, %o1, %o2, .fire_io_poke_found, herr_noraddr,
CHK_FIRE_LINK_STATUS(%g1, %g5, %g6)
brz,a,pn %g5, .skip_io_poke
!! %g3 = CFG base address
DISABLE_PCIE_RWUC_ERRORS(%g1, %g5, %g6, %g7)
! Read from PCI config space
ENABLE_PCIE_RWUC_ERRORS(%g1, %g5, %g6, %g7)
ENTRY_NP(fire_mondo_receive)
SET_SIZE(fire_mondo_receive)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
sethi %hi(FIRE_EQSIZE-1), %g2
or %g2, %lo(FIRE_EQSIZE-1), %g2
* Verify RA range/alignment
RA2PA_RANGE_CONV(%g2, %o2, %g0, herr_noraddr, %g7, %g6)
MSIEQNUM2MSIEQ(%g1, %o1, %g4, %g3, %g2)
ldx [%g1 + FIRE_COOKIE_EQSTATE], %g2
ldx [%g1 + FIRE_COOKIE_EQHEAD], %g2
ldx [%g1 + FIRE_COOKIE_EQTAIL], %g3
stx %g6, [%g4 + FIRE_MSIEQ_GUEST]
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
MSIEQNUM2MSIEQ(%g1, %o1, %g4, %g3, %g5)
ldx [%g4 + FIRE_MSIEQ_GUEST], %g5
PA2RA_CONV(%g2, %g5, %o1, %g6, %g3) ! PA -> RA (%o1)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* ret1 EQ valid (%o1) (0: Invalid 1: Valid)
ENTRY_NP(fire_msiq_getvalid)
ldx [%g1 + FIRE_COOKIE_EQSTATE], %g2
SET_SIZE(fire_msiq_getvalid)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg2 EQ valid (%o2) (0: Invalid 1: Valid)
ENTRY_NP(fire_msiq_setvalid)
MSIEQNUM2MSIEQ(%g1, %o1, %g3, %g2, %g4)
ldx [%g3 + FIRE_MSIEQ_GUEST], %g2 ! Guest Q base
movrz %o2, FIRE_COOKIE_EQCTLCLR, %g3
1: movrnz %o2, FIRE_COOKIE_EQCTLSET, %g3
SET_SIZE(fire_msiq_setvalid)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* ret1 EQ state (%o1) (0: Idle 1: Error)
ENTRY_NP(fire_msiq_getstate)
ldx [%g1 + FIRE_COOKIE_EQSTATE], %g2
movrnz %o1, HVIO_MSIQSTATE_ERROR, %o1
SET_SIZE(fire_msiq_getstate)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg2 EQ state (%o2) (0: Idle 1: Error)
ENTRY_NP(fire_msiq_setstate)
* To change state from error to idle, we set bits 57 and 47 in the
* Event Queue Control Clear Register (CCR)
* To change state from idle to error, we set bits 44 and 57 in the
* Event Queue Control Set Register (CSR)
mov FIRE_COOKIE_EQCTLCLR, %g6 ! EQ CCR
movrnz %o2, FIRE_COOKIE_EQCTLSET, %g6 ! EQ CSR
setx (1 << FIRE_EQCCR_COVERR)|(1 << FIRE_EQCCR_E2I_SHIFT), %g5, %g3 ! set idle
setx (1 << FIRE_EQCSR_ENOVERR)|(1 << FIRE_EQCSR_EN_SHIFT), %g5, %g6 ! set error
SET_SIZE(fire_msiq_setstate)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
ENTRY_NP(fire_msiq_gethead)
ldx [%g1 + FIRE_COOKIE_EQHEAD], %g2
sllx %o1, FIRE_EQREC_SHIFT, %o1
SET_SIZE(fire_msiq_gethead)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
ENTRY_NP(fire_msiq_sethead)
ldx [%g1 + FIRE_COOKIE_EQHEAD], %g2
sllx %g3, FIRE_EQREC_SHIFT, %g3
!! %g3 = Prev Head offset
MSIEQNUM2MSIEQ(%g1, %o1, %g4, %g5, %g7)
!! %g3 = Prev Head offset
!! %g4 = struct *fire_msieq
ldx [%g4 + FIRE_MSIEQ_BASE], %g5 /* HW Q base */
ldx [%g4 + FIRE_MSIEQ_EQMASK], %g7
srlx %o2, FIRE_EQREC_SHIFT, %g3
SET_SIZE(fire_msiq_sethead)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
ENTRY_NP(fire_msiq_gettail)
ldx [%g1 + FIRE_COOKIE_EQTAIL], %g2
sllx %o1, FIRE_EQREC_SHIFT, %o1
SET_SIZE(fire_msiq_gettail)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* ret1 MSI status (%o1) (0: Invalid 1: Valid)
ENTRY_NP(fire_msi_getvalid)
ldx [%g1 + FIRE_COOKIE_MSIMAP], %g2
mov HVIO_MSI_INVALID, %o1
movrlz %g5, HVIO_MSI_VALID, %o1
SET_SIZE(fire_msi_getvalid)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg2 MSI status (%o2) (0: Invalid 1: Valid)
ENTRY_NP(fire_msi_setvalid)
ldx [%g1 + FIRE_COOKIE_MSIMAP], %g2
sllx %o2, FIRE_MSIMR_V_SHIFT, %g3
SET_SIZE(fire_msi_setvalid)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* ret1 MSI state (%o1) (0: Idle 1: Delivered)
ENTRY_NP(fire_msi_getstate)
ldx [%g1 + FIRE_COOKIE_MSIMAP], %g2
mov HVIO_MSI_INVALID, %o1
0: srlx %g5, FIRE_MSIMR_EQWR_N_SHIFT, %o1
and %o1, HVIO_MSI_VALID, %o1
SET_SIZE(fire_msi_getstate)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg2 MSI state (%o2) (0: Idle)
ENTRY_NP(fire_msi_setstate)
sllx %g5, FIRE_MSIMR_EQWR_N_SHIFT, %g5
ldx [%g1 + FIRE_COOKIE_MSICLR], %g2
SET_SIZE(fire_msi_setstate)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
ENTRY_NP(fire_msi_getmsiq)
ldx [%g1 + FIRE_COOKIE_MSIMAP], %g2
and %o1, FIRE_MSIEQNUM_MASK, %o1
SET_SIZE(fire_msi_getmsiq)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg3 MSI type (%o3) (MSI32=0 MSI64=1)
ENTRY_NP(fire_msi_setmsiq)
ldx [%g1 + FIRE_COOKIE_MSIMAP], %g2
andn %g5, FIRE_MSIEQNUM_MASK, %g5
SET_SIZE(fire_msi_setmsiq)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg1 MSI msg type (%o1)
ENTRY_NP(fire_msi_msg_getmsiq)
ldx [%g1 + FIRE_COOKIE_MSGMAP], %g2
cmp %o1, PCIE_NONFATAL_MSG
mov FIRE_NONFATAL_OFF, %g3
cmp %o1, PCIE_PME_ACK_MSG
mov FIRE_PME_ACK_OFF, %g3
and %o1, FIRE_MSIEQNUM_MASK, %o1
SET_SIZE(fire_msi_msg_getmsiq)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg1 MSI msg type (%o1)
ENTRY_NP(fire_msi_msg_setmsiq)
ldx [%g1 + FIRE_COOKIE_MSGMAP], %g2
cmp %o1, PCIE_NONFATAL_MSG
mov FIRE_NONFATAL_OFF, %g3
cmp %o1, PCIE_PME_ACK_MSG
mov FIRE_PME_ACK_OFF, %g3
andn %g4, FIRE_MSIEQNUM_MASK, %g4
SET_SIZE(fire_msi_msg_setmsiq)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg1 MSI msg type (%o1)
* ret1 MSI msg valid state (%o1)
ENTRY_NP(fire_msi_msg_getvalid)
ldx [%g1 + FIRE_COOKIE_MSGMAP], %g2
ldx [%g2 + FIRE_CORR_OFF], %g3
cmp %o1, PCIE_NONFATAL_MSG
ldx [%g2 + FIRE_NONFATAL_OFF], %g3
ldx [%g2 + FIRE_FATAL_OFF], %g3
ldx [%g2 + FIRE_PME_OFF], %g3
cmp %o1, PCIE_PME_ACK_MSG
ldx [%g2 + FIRE_PME_ACK_OFF], %g3
1: movrlz %g3, HVIO_PCIE_MSG_VALID, %o1
movrgez %g3, HVIO_PCIE_MSG_INVALID, %o1
SET_SIZE(fire_msi_msg_getvalid)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
* arg1 MSI msg type (%o1)
* arg2 MSI msg valid state (%o2)
ENTRY_NP(fire_msi_msg_setvalid)
ldx [%g1 + FIRE_COOKIE_MSGMAP], %g2
cmp %o1, PCIE_NONFATAL_MSG
mov FIRE_NONFATAL_OFF, %g3
cmp %o1, PCIE_PME_ACK_MSG
mov FIRE_PME_ACK_OFF, %g3
sllx %o2, FIRE_MSGMR_V_SHIFT, %g5
SET_SIZE(fire_msi_msg_setvalid)
ENTRY_NP(fire_msi_mondo_receive)
STRAND_PUSH(%g1, %g3, %g4)
STRAND_PUSH(%g2, %g3, %g4)
and %g2, FIRE_DEVINO_MASK, %g2
sub %g2, FIRE_EQ2INO(0), %g2
MSIEQNUM2MSIEQ(%g1, %g2, %g3, %g4, %g5)
!! %g3 = struct *fire_msieq
ldx [%g1 + FIRE_COOKIE_EQTAIL], %g5
ldx [%g1 + FIRE_COOKIE_EQHEAD], %g5
!! %g3 = struct fire_msieq *
sllx %g6, FIRE_EQREC_SHIFT, %g6 /* New tail offset */
sllx %g7, FIRE_EQREC_SHIFT, %g7 /* Old Tail offset */
ldx [%g3 + FIRE_MSIEQ_GUEST], %g2 /* Guest Q base */
ldx [%g3 + FIRE_MSIEQ_BASE], %g5 /* HW Q base */
!! %g3 = struct fire_msieq *
! Word 0 is TTTT EQW0[63:61]
ldx [%g5 + %g6], %g1 ! Read Word 0 From HW
stx %g1, [%g3 + FIRE_MSIEQ_WORD0]
srlx %g1, FIRE_EQREC_TYPE_SHIFT+5, %g4
stx %g4, [%g2 + %g6] ! Store Word 0
ldx [%g5 + %g6], %g4 ! Read Word 1 from HW
stx %g4, [%g3 + FIRE_MSIEQ_WORD1]
stx %g0, [%g2 + %g6] ! Store Word 1 = 0
stx %g0, [%g2 + %g6] ! Store Word 2 = 0
stx %g0, [%g2 + %g6] ! Store Word 3 = 0
! Word 4 is RRRR.RRRR EQW0[31:16]
sllx %g1, 64-(MSIEQ_RID_SHIFT+MSIEQ_RID_SIZE_BITS), %g4
srlx %g4, 64-MSIEQ_RID_SHIFT, %g4
stx %g4, [%g2 + %g6] ! Store Word 4
! Word 5 is MSI address EQW1[63:0]
ldx [%g3 + FIRE_MSIEQ_WORD1], %g4
stx %g4, [%g2 + %g6] ! Store Word 5
! Word 6 is MSI Data EQW0[15:0]
sllx %g1, 64-MSIEQ_DATA_SIZE_BITS, %g4
srlx %g4, 64-MSIEQ_DATA_SIZE_BITS, %g4
stx %g4, [%g2 + %g6] ! Store Word 6
! Extract GGGG.GGGG EQW0[31:16] -> W6[47:32]
srlx %g1, MSIEQ_TID_SHIFT, %g4
sllx %g4, 64-MSIEQ_TID_SIZE_BITS, %g4
srlx %g4, 64-(MSIEQ_TID_SIZE_BITS+VPCI_MSIEQ_TID_SHIFT), %g4
ldx [%g3 + FIRE_MSIEQ_WORD0], %g1
! Extract CCC field EQW0[58:56] -> W6[18:16]
sllx %g1, 64-(MSIEQ_MSG_RT_CODE_SHIFT+MSIEQ_MSG_RT_CODE_SIZE_BITS), %g1
srlx %g1, 64-MSIEQ_MSG_RT_CODE_SIZE_BITS, %g1
sllx %g1, VPCI_MSIEQ_MSG_RT_CODE_SHIFT, %g1
ldx [%g3 + FIRE_MSIEQ_WORD0], %g1
! Extract MMMM.MMMM field EQW0[7:0] -> W6[7:0]
sllx %g1, 64-MSIEQ_MSG_CODE_SIZE_BITS, %g1
srlx %g1, 64-MSIEQ_MSG_CODE_SIZE_BITS, %g1
stx %g4, [%g2 + %g6] ! Store Word 6
stx %g0, [%g2 + %g6] ! Store Word 7
ldx [%g3 + FIRE_MSIEQ_EQMASK], %g4
SET_SIZE(fire_msi_mondo_receive)
DATA_GLOBAL(fire_perf_regs_table)
.xword FIRE_JBC_PERF_CNTRL, 0x000000000000ffff ! Read Offset & Mask
.xword FIRE_JBC_PERF_CNTRL, 0x000000000000ffff ! Write Offset & Mask
.xword FIRE_JBC_PERF_CNT0, 0xffffffffffffffff
.xword FIRE_JBC_PERF_CNT0, 0xffffffffffffffff
.xword FIRE_JBC_PERF_CNT1, 0xffffffffffffffff
.xword FIRE_JBC_PERF_CNT1, 0xffffffffffffffff
.xword FIRE_DLC_IMU_ICS_IMU_PERF_CNTRL, 0x000000000000ffff
.xword FIRE_DLC_IMU_ICS_IMU_PERF_CNTRL, 0x000000000000ffff
.xword FIRE_DLC_IMU_ICS_IMU_PERF_CNT0, 0xffffffffffffffff
.xword FIRE_DLC_IMU_ICS_IMU_PERF_CNT0, 0xffffffffffffffff
.xword FIRE_DLC_IMU_ICS_IMU_PERF_CNT1, 0xffffffffffffffff
.xword FIRE_DLC_IMU_ICS_IMU_PERF_CNT1, 0xffffffffffffffff
.xword FIRE_DLC_MMU_PRFC, 0x000000000000ffff
.xword FIRE_DLC_MMU_PRFC, 0x000000000000ffff
.xword FIRE_DLC_MMU_PRF0, 0xffffffffffffffff
.xword FIRE_DLC_MMU_PRF0, 0xffffffffffffffff
.xword FIRE_DLC_MMU_PRF1, 0xffffffffffffffff
.xword FIRE_DLC_MMU_PRF1, 0xffffffffffffffff
.xword FIRE_PLC_TLU_CTB_TLR_TLU_PRFC, 0x000000000003ffff
.xword FIRE_PLC_TLU_CTB_TLR_TLU_PRFC, 0x000000000003ffff
.xword FIRE_PLC_TLU_CTB_TLR_TLU_PRF0, 0xffffffffffffffff
.xword FIRE_PLC_TLU_CTB_TLR_TLU_PRF0, 0xffffffffffffffff
.xword FIRE_PLC_TLU_CTB_TLR_TLU_PRF1, 0xffffffffffffffff
.xword FIRE_PLC_TLU_CTB_TLR_TLU_PRF1, 0xffffffffffffffff
.xword FIRE_PLC_TLU_CTB_TLR_TLU_PRF2, 0x00000000ffffffff
.xword FIRE_PLC_TLU_CTB_TLR_TLU_PRF2, 0x00000000ffffffff
.xword FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LINK_PERF_CNTR1_SEL, 0xffffffff
.xword FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LINK_PERF_CNTR1_SEL, 0xffffffff
.xword FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LINK_PERF_CNTR1, 0xffffffff
.xword FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LINK_PERF_CNTR1_TEST, 0xffffffff
.xword FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LINK_PERF_CNTR2, 0xffffffff
.xword FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LINK_PERF_CNTR2_TEST, 0xffffffff
SET_SIZE(fire_perf_regs_table)
* Each register entry is 0x20 bytes
#define FIRE_REGID2OFFSET(id, offset) sllx id, 5, offset
#define FIRE_PERF_READ_ADR 0
#define FIRE_PERF_READ_MASK 8
#define FIRE_PERF_WRITE_ADR 0x10
#define FIRE_PERF_WRITE_MASK 0x18
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
ENTRY_NP(fire_get_perf_reg)
ldx [%g1 + FIRE_COOKIE_PERFREGS], %g2
mov FIRE_COOKIE_JBUS, %g2
cmp %g3, FIRE_DLC_IMU_ICS_IMU_PERF_CNTRL_MASK
movgeu %xcc, FIRE_COOKIE_PCIE, %g2
!! %g1 = Fire cookie pointer
setx fire_perf_regs_table, %g3, %g4
ldx [%g5 + CONFIG_RELOC], %g3
!! %g1 = Fire cookie pointer
!! %g4 = Performance regs table
FIRE_REGID2OFFSET(%o1, %g3)
ldx [%g4 + FIRE_PERF_READ_ADR], %g3
ldx [%g4 + FIRE_PERF_READ_MASK], %g4
!! %g1 = Fire cookie pointer
SET_SIZE(fire_get_perf_reg)
* %g1 = Fire Cookie Pointer
* arg0 dev config pa (%o0)
ENTRY_NP(fire_set_perf_reg)
ldx [%g1 + FIRE_COOKIE_PERFREGS], %g2
mov FIRE_COOKIE_JBUS, %g2
cmp %g3, FIRE_DLC_IMU_ICS_IMU_PERF_CNTRL_MASK
movgeu %xcc, FIRE_COOKIE_PCIE, %g2
!! %g1 = Fire cookie pointer
setx fire_perf_regs_table, %g3, %g4
ldx [%g5 + CONFIG_RELOC], %g3
!! %g1 = Fire cookie pointer
!! %g4 = Performance regs table
FIRE_REGID2OFFSET(%o1, %g3)
ldx [%g4 + FIRE_PERF_WRITE_ADR], %g3
ldx [%g4 + FIRE_PERF_WRITE_MASK], %g4
!! %g1 = Fire cookie pointer
SET_SIZE(fire_set_perf_reg)
* fire_intr_redistribution
* Need to invalidate all of the virtual intrs that are
* mapped to the cpu passed in %g1
* Need to retarget the 3 HW intrs hv controls that are
* mapped to the cpu passed in %g1 to cpu in %g2
ENTRY_NP(fire_intr_redistribution)
CPU_PUSH(%g7, %g3, %g4, %g5)
mov %g1, %g3 ! save cpuid
DEVINST2INDEX(%g4, %g1, %g1, %g5, .fire_intr_redis_fail)
DEVINST2COOKIE(%g4, %g1, %g1, %g5, .fire_intr_redis_fail)
HVCALL(_fire_intr_redistribution)
DEVINST2INDEX(%g4, %g1, %g1, %g5, .fire_intr_redis_fail)
DEVINST2COOKIE(%g4, %g1, %g1, %g5, .fire_intr_redis_fail)
HVCALL(_fire_intr_redistribution)
mov %g3, %g1 ! restore cpuid
CPU_POP(%g7, %g3, %g4, %g5)
SET_SIZE(fire_intr_redistribution)
* _fire_intr_redistribution
ENTRY_NP(_fire_intr_redistribution)
CPU_PUSH(%g7, %g4, %g5, %g6)
lduh [%g1 + FIRE_COOKIE_INOMAX], %g2 ! loop counter
be %xcc, .fire_intr_redis_continue ! fire errors handle separate
be %xcc, .fire_intr_redis_continue ! fire errors handle separate
ldx [%g1 + FIRE_COOKIE_INTMAP], %g5
srlx %g4, JPID_SHIFT, %g7
! compare with this cpu, if match, set to idle
bne,pt %xcc, .fire_intr_redis_continue
! save cpuid since call clobbers it
CPU_PUSH(%g3, %g4, %g5, %g6)
CPU_PUSH(%g2, %g4, %g5, %g6)
mov INTR_DISABLED, %g3 ! Invalid
HVCALL(_fire_intr_setvalid)
CPU_POP(%g2, %g4, %g5, %g6)
CPU_POP(%g3, %g4, %g5, %g6)
.fire_intr_redis_continue:
bgeu,pt %xcc, ._fire_intr_redis_loop
CPU_POP(%g7, %g4, %g5, %g6)
SET_SIZE(_fire_intr_redistribution)
* fire - (preserved) Fire Cookie Pointer
* msieq_id - (preserved) MSI EQ id
#define FIRE_MSIQ_UNCONFIGURE(fire, msieq_id, scr1, scr2, scr3, scr4) \
cmp msieq_id, FIRE_NEQS ;\
MSIEQNUM2MSIEQ(fire, msieq_id, scr3, scr2, scr1) ;\
REGNO2OFFSET(msieq_id, scr4) ;\
ldx [fire + FIRE_COOKIE_EQHEAD], scr1 ;\
ldx [fire + FIRE_COOKIE_EQTAIL], scr2 ;\
stx %g0, [scr1 + scr4] ;\
stx %g0, [scr2 + scr4] ;\
stx %g0, [scr3 + FIRE_MSIEQ_GUEST] ;\
* fire - (preserved) Fire Cookie Pointer
* msieq_id - (preserved) MSI EQ id
#define FIRE_MSIQ_INVALIDATE(fire, msieq_id, scr1, scr2, scr3, scr4) \
cmp msieq_id, FIRE_NEQS ;\
ldx [fire + FIRE_COOKIE_EQCTLCLR], scr1 ;\
/* 44=disable, 47=e2i 57=coverr */ ;\
setx (1<<44)|(1<<47)|(1<<57), scr3, scr2 ;\
REGNO2OFFSET(msieq_id, scr4) ;\
stx scr2, [scr1 + scr4] ;\
* FIRE_MSI_INVALIDATE - Invalidate the MSI mappings and then clear
* the MSI status (mark as "idle")
* fire - (preserved) Fire Cookie Pointer
* msi_num - (preserved) MSI number (%o1)
#define FIRE_MSI_INVALIDATE(fire, msi_num, scr1, scr2, scr3) \
cmp msi_num, FIRE_MSI_MASK ;\
REGNO2OFFSET(msi_num, scr2) ;\
ldx [fire + FIRE_COOKIE_MSIMAP], scr1 ;\
ldx [scr1 + scr2], scr3 ;\
/* clear both bits 62 and 63 in the map reg */ ;\
/* valid and ok to write (pending MSI) bit */ ;\
stx scr3, [scr1 + scr2] ;\
mov 1, scr3 /* now mark status as "idle" */ ;\
sllx scr3, FIRE_MSIMR_EQWR_N_SHIFT, scr3 ;\
ldx [fire + FIRE_COOKIE_MSICLR], scr1 ;\
stx scr3, [scr1 + scr2] ;\
* FIRE_MSI_MSG_INVALIDATE
* fire - (preserved) Fire Cookie Pointer
* msg_offset - (preserved) message offset such as FIRE_CORR_OFF,
* FIRE_NONFATAL_OFF, etc. (reg or contant)
#define FIRE_MSI_MSG_INVALIDATE(fire, msg_offset, scr1, scr2) \
ldx [fire + FIRE_COOKIE_MSGMAP], scr1 ;\
ldx [scr1 + msg_offset], scr2 ;\
stx scr2, [scr1 + msg_offset]
#define FIRE_INVALIDATE_INTX(fire, intx_off, scr1, scr2) \
ldx [fire + FIRE_COOKIE_PCIE], scr1 ;\
* %g1 - Fire cookie (preserved)
* %g2 - root complex (0=A, 1=B)
ENTRY_NP(fire_leaf_soft_reset)
! Put STRAND in protected mode
DISABLE_PCIE_RWUC_ERRORS(%g1, %g4, %g5, %g6)
!! %g2 PCI bus (0=A, 1=B)
! Destroy the iommu mappings
FIRE_IOMMU_FLUSH(%g1, %g2, %g4, %g5, %g6)
! Invalidate any pending legacy (level) interrupts
! that were previously signalled from switches we just reset
FIRE_INVALIDATE_INTX(%g1, FIRE_DLC_IMU_RDS_INTX_INT_A_INT_CLR_REG,
FIRE_INVALIDATE_INTX(%g1, FIRE_DLC_IMU_RDS_INTX_INT_B_INT_CLR_REG,
FIRE_INVALIDATE_INTX(%g1, FIRE_DLC_IMU_RDS_INTX_INT_C_INT_CLR_REG,
FIRE_INVALIDATE_INTX(%g1, FIRE_DLC_IMU_RDS_INTX_INT_D_INT_CLR_REG,
set FIRE_MAX_MSIS - 1, %g2
1: FIRE_MSI_INVALIDATE(%g1, %g2, %g6, %g4, %g5)
! invalidate all MSI Messages
FIRE_MSI_MSG_INVALIDATE(%g1, FIRE_CORR_OFF, %g5, %g4)
FIRE_MSI_MSG_INVALIDATE(%g1, FIRE_NONFATAL_OFF, %g5, %g4)
FIRE_MSI_MSG_INVALIDATE(%g1, FIRE_FATAL_OFF, %g5, %g4)
FIRE_MSI_MSG_INVALIDATE(%g1, FIRE_PME_OFF, %g5, %g4)
FIRE_MSI_MSG_INVALIDATE(%g1, FIRE_PME_ACK_OFF, %g5, %g4)
! invalidate all interrupts
! invalidate inos 63 and 62, special case ones not set with
ldx [%g1 + FIRE_COOKIE_VIRTUAL_INTMAP], %g2
add %g2, PCIE_ERR_MONDO_OFFSET, %g2
ldx [%g1 + FIRE_COOKIE_VIRTUAL_INTMAP], %g2
add %g2, PCIE_ERR_MONDO_OFFSET, %g2
ldx [%g1 + FIRE_COOKIE_VIRTUAL_INTMAP], %g2
add %g2, JBC_ERR_MONDO_OFFSET, %g2
CPU_PUSH(%g7, %g4, %g5, %g6) ! _fire_intr_setvalid clobbers all regs
! Don't invalidate inos 62 & 63 in this loop, 62 and 63 are done above
1: HVCALL(_fire_intr_setvalid) ! clobbers %g4-%g6
CPU_POP(%g7, %g4, %g5, %g6) ! restore clobbered value
! invalidate and unconfigure all MSI EQs
1: FIRE_MSIQ_INVALIDATE(%g1, %g2, %g3, %g4, %g5, %g6)
FIRE_MSIQ_UNCONFIGURE(%g1, %g2, %g3, %g4, %g5, %g6)
ENABLE_PCIE_RWUC_ERRORS(%g1, %g2, %g4, %g5)
! Bring STRAND out of protected mode
SET_SIZE(fire_leaf_soft_reset)
* Wrapper around fire_leaf_soft_reset so it can be called from C
* SPARC ABI requries only that g2,g3,g4 are preserved across
* %o1 = root complex (0=A, 1=B), bus number
* void c_fire_leaf_soft_reset(struct fire_cookie *, uint64 root)
ENTRY(c_fire_leaf_soft_reset)
STRAND_PUSH(%g2, %g6, %g7)
STRAND_PUSH(%g3, %g6, %g7)
STRAND_PUSH(%g4, %g6, %g7)
!! %g2 - root complex (0=A, 1=B)
HVCALL(fire_leaf_soft_reset)
SET_SIZE(c_fire_leaf_soft_reset)
! When DEBUG is defined hcall.s versions
! of these are labels are too far away
herr_nocpu: HCALL_RET(ENOCPU)
herr_nomap: HCALL_RET(ENOMAP)
herr_inval: HCALL_RET(EINVAL)
herr_badalign: HCALL_RET(EBADALIGN)
* This macro brings a given fire leaf's link up
* fire - (preserved) pointer to FIRE_COOKIE
* Bring up a fire link. Returns false on failure.
/* get the base addr of RC control regs */
ldx [ %o0 + FIRE_COOKIE_PCIE ], %o1
/* Clear Other Event Status Register LinkDown bit */
setx FIRE_PLC_TLU_CTB_TLR_OE_ERR_RW1C_ALIAS, %o3, %o2
/* The drain bit is cleared via W1C */
setx FIRE_PLC_TLU_CTB_TLR_TLU_STS, %o3, %o2
/* bit 8 of the TLU Control Register is */
/* cleared to initiate link training */
setx FIRE_PLC_TLU_CTB_TLR_TLU_CTL, %o3, %o2
CPU_MSEC_DELAY(200, %o1, %o2, %o3)
ldx [ %o0 + FIRE_COOKIE_CFG ], %o1
setx UPST_CFG_BASE, %o3, %o2
lduwa [%o1 + %o2]ASI_P_LE, %o3 /* 16 reads are */
lduwa [%o1 + %o2]ASI_P_LE, %o3 /* needed to flush */
lduwa [%o1 + %o2]ASI_P_LE, %o3 /* the fifo after */
lduwa [%o1 + %o2]ASI_P_LE, %o3 /* toggling the */
lduwa [%o1 + %o2]ASI_P_LE, %o3 /* link. */
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
lduwa [%o1 + %o2]ASI_P_LE, %o3
* This function brings a given fire leaf's link down
* fire - (preserved) pointer to FIRE_COOKIE
/* get the base addr of RC control regs */
ldx [ %o0 + FIRE_COOKIE_PCIE ], %o1
/* And now the actual reset code... */
/* Remain in detect quiesce */
setx FIRE_PLC_TLU_CTB_TLR_TLU_CTL, %o4, %o2
setx FIRE_PLC_TLU_CTB_LPR_PCIE_LPU_LTSSM_CNTL, %o4, %o2
setx 0x80000401, %o4, %o3
/* Wait for link to go down */
setx FIRE_PLC_TLU_CTB_TLR_TLU_STS, %o4, %o2
* Check and see if a fire link is up. Returns true on
* success, false on failure.
ENTRY(is_fire_port_link_up)
ldx [ %o0 + FIRE_COOKIE_PCIE ], %o1
setx FIRE_PLC_TLU_CTB_TLR_TLU_STS, %o3, %o2
SET_SIZE(is_fire_port_link_up)