* ========== Copyright Header Begin ==========================================
* Hypervisor Software File: hcall_ncs.s
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* - Do no alter or remove copyright notices
* - Redistribution and use of this software in source and binary forms, with
* or without modification, are permitted provided that the following
* - Redistribution of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistribution in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of Sun Microsystems, Inc. or the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
* You acknowledge that this software is not designed, licensed or
* intended for use in the design, construction, operation or maintenance of
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
.ident "@(#)hcall_ncs.s 1.13 07/09/12 SMI"
#include <sys/asm_linkage.h>
#include <sparcv9/misc.h>
#include <devices/pc16550.h>
*-----------------------------------------------------------
* Called via setup_cpu() if the given cpu has access
* to a mau. If the handle is non-NULL then the mau
* struct has already been initialized.
* %g1 - &config.maus[mau-id] or NULL (0) if error.
*-----------------------------------------------------------
VCPU2STRAND_STRUCT(%g1, %g5)
ldub [%g5 + STRAND_ID], %g5
and %g5, NSTRANDS_PER_MAU_MASK, %g5 ! %g5 = hw thread-id
ldx [%g3 + MAU_CPUSET], %g6
stx %g6, [%g3 + MAU_CPUSET]
add %g5, MAU_CPU_ACTIVE, %g5
ldx [%g3 + MAU_CPUSET], %g5
cmp %g4, %g5 ! 1st (only) cpu?
ID2HANDLE(%g6, MAU_HANDLE_SIG, %g6)
stx %g6, [%g3 + MAU_HANDLE]
* Now set up interrupt stuff.
ldx [%g1 + CPU_GUEST], %g1
LABEL_ADDRESS(mau_intr_getstate, %g4)
!! %g3 = &config.maus[mau-id]
!! %g4 = mau_intr_getstate
!! %g5 = NULL (no setstate callback)`
!! %g7 = return pc (set up in setup_cpu())
* Note that vdev_intr_register() clobbers %g1,%g3,%g5-%g7.
mov %g7, %l3 ! save return pc
HVCALL(vdev_intr_register)
stx %g1, [%g3 + MAU_IHDLR + CI_COOKIE]
mov MAU_STATE_RUNNING, %g2
stx %g2, [%g3 + MAU_STATE]
mov %l3, %g7 ! restore return pc
mov %g3, %g1 ! return &maus[mau-id]
* Wrapper around setup_mau, so it can be called from C
* SPARC ABI requries only that g2,g3,g4 are preserved across
* %g1 - &config.maus[mau-id] or NULL (0) if error.
* maup = c_setup_mau(vcpup, ino, &config);
STRAND_PUSH(%g2, %g6, %g7)
STRAND_PUSH(%g3, %g6, %g7)
STRAND_PUSH(%g4, %g6, %g7)
*-----------------------------------------------------------
* Function: stop_crypto()
* This routines needs to execute ON the the core
* containing the desired MAU to be stopped. This
* is accomplished by being called during stop_vcpu_cmd.
* We wait for the MAU to stop by doing a sync-load.
* If the MAU is currently busy running a job on behalf
* of the current strand (cpu) being stopped then the
* sync-load will wait for it to complete. If the MAU
* is busy running a job for a different strand (cpu)
* then the sync-load will immediately return. Since
* the job being executed is on behalf of a different
* cpu then the immediate return is okay since we only
* care about the local cpu being stopped.
* Note that we have to enable interrupts while doing
* this load to ensure the MAU can complete the operation
* including possibly handling an interrupt.
* Since we are stopping the current cpu we can be
* assured that any new MAU jobs will not be issued
* on this strand (cpu). Any subsequent MAU jobs will
* be issued from some other strand.
*-----------------------------------------------------------
VCPU2STRAND_STRUCT(%g1, %g5)
ldub [%g5 + STRAND_ID], %g5
and %g5, NSTRANDS_PER_CORE_MASK, %g5 ! %g5 = hw thread-id
ldub [%g3 + MAU_CPU_ACTIVE], %g4
stb %g0, [%g3 + MAU_CPU_ACTIVE]
*-----------------------------------------------------------
* Function: start_crypto()
* All we have to do here is set the MAU_CPU_ACTIVE word.
*-----------------------------------------------------------
VCPU2STRAND_STRUCT(%g1, %g4)
ldub [%g4 + STRAND_ID], %g4
and %g4, NSTRANDS_PER_CORE_MASK, %g4 ! %g4 = hw thread-id
stb %g4, [%g3 + MAU_CPU_ACTIVE]
*-----------------------------------------------------------
* Called from within trap context.
*-----------------------------------------------------------
brz,pn %g2, .mi_exit_nolock
MAU_LOCK_ENTER(%g2, %g5, %g3, %g6)
ldx [%g2 + MAU_STATE], %g3
cmp %g3, MAU_STATE_RUNNING
VCPU2STRAND_STRUCT(%g1, %g7)
ldub [%g7 + STRAND_ID], %g7
and %g7, NSTRANDS_PER_MAU_MASK, %g7 ! %g7 = hw thread-id
ldub [%g4 + MAU_CPU_ACTIVE], %g4
ldx [%g2 + MAU_STORE_IN_PROGR], %g3
stx %g0, [%g2 + MAU_STORE_IN_PROGR]
ldx [%g2 + MAU_ENABLE_CWQ], %g3
mov ASI_SPU_CWQ_CSR_ENABLE, %g4
stxa %g3, [%g4]ASI_STREAM ! re-enable the cwq
ldx [%g2 + MAU_QUEUE + MQ_HEAD], %g3
ldx [%g2 + MAU_QUEUE + MQ_TAIL], %g4
mov %g0, %g6 ! do_intr flag
cmp %g3, %g4 ! queue empty?
be,a,pn %xcc, .mi_chkintr
st %g0, [%g2 + MAU_QUEUE + MQ_BUSY]
ldx [%g3 + NHD_STATE], %g4
* If the descriptor is Pending, then we
* mark it Busy and start the job on the MA.
* There is no interrupt to the guest since
* obviously the job is not complete yet.
cmp %g4, ND_STATE_PENDING
stx %g4, [%g3 + NHD_STATE]
* Load up the MAU registers and start the job.
* Note that we force the Interrupt bit to be on.
* We can assume given the fact that we arrived in
* this code from an interrupt, so all subsequent
* We are out of registers, so we hide our do_intr flag
* in %g2 which we know is a 8-byte aligned address and
!! %g3 = ncs_hvdesc.nhd_regs
MAU_LOAD1(%g2, %g3, %g7, %g1, 1, .mi_addr_err, .mi_chkrv, %g4, %g5, %g6)
MAU_LOAD(%g3, %g7, %g1, 1, .mi_addr_err, .mi_chkrv, %g4, %g5, %g6)
!! %g1 = return value (errno)
and %g2, 1, %g6 ! get hidden do_intr flag
andn %g2, 1, %g2 ! restore placeholder
brnz,a,pn %g1, .mi_set_state
stx %g1, [%g3 + NHD_STATE]
ldx [%g3 + NHD_TYPE], %g4
and %g4, ND_TYPE_END, %g5
add %g3, NCS_HVDESC_SIZE, %g3 ! mq_head++
ldx [%g2 + MAU_QUEUE + MQ_END], %g4
cmp %g3, %g4 ! mq_head == mq_end?
bgeu,a,pn %xcc, .mi_qwrap
ldx [%g2 + MAU_QUEUE + MQ_BASE], %g3 ! mq_head = mq_base
stx %g3, [%g2 + MAU_QUEUE + MQ_HEAD]
ldx [%g2 + MAU_QUEUE + MQ_TAIL], %g4
* If previous descriptor was not in error or was the
* last one in a job, then check the next descriptor
* If we reach here then we encountered an
* error on a descriptor within the middle
* of a job. Need to pop the entire job
* off the queue. We stop popping descriptors
* off until we either hit the Last one or
* hit the Tail of the queue.
* Note that we set state in all remaining
* descriptors in job to Error (ND_STATE_ERROR).
cmp %g3, %g4 ! queue empty?
be,a,pn %xcc, .mi_genintr
st %g0, [%g2 + MAU_QUEUE + MQ_BUSY]
* If the descriptor is Busy, then we have
* been interrupted for the completion of
* this particular descriptor. If it is
* the End (last) descriptor in the job or
* the last descriptor in our queue, then we'll
* generate an interrupt to the guest.
MAU_CHECK_ERR(%g1, %g4, %g5)
stx %g1, [%g3 + NHD_ERRSTATUS]
* This is the time we would store something
* into maus[].MAU_INTR.CI_DATA if we wanted,
* however it is currently unused.
ldx [%g2 + MAU_IHDLR + CI_COOKIE], %g1
HVCALL(vdev_intr_generate)
*-----------------------------------------------------------
* Function: mau_intr_getstate()
*-----------------------------------------------------------
ENTRY_NP(mau_intr_getstate)
ldx [%g1 + MAU_QUEUE + MQ_HEAD], %g3
ldx [%g1 + MAU_QUEUE + MQ_HEAD_MARKER], %g4
SET_SIZE(mau_intr_getstate)
*-----------------------------------------------------------
* Function: hcall_ncs_request(int cmd, uint64_t arg, size_t sz)
* %o5 - hcall function number
* %o1 - Real address of 'arg' data structure
* %o2 - Size of data structure at 'arg'.
* %o0 - EOK (on success),
* EINVAL, ENORADDR, EBADALIGN, EWOULDBLOCK (on failure)
*-----------------------------------------------------------
ENTRY_NP(hcall_ncs_request)
btst NCS_PTR_ALIGN - 1, %o1
bnz,pn %xcc, herr_badalign
* convert %o1 to physaddr for calls below,
RA2PA_RANGE_CONV_UNK_SIZE(%g2, %o1, %o2, herr_noraddr, %g3, %g4)
cmp %o0, NCS_V10_QTAIL_UPDATE
be %xcc, ncs_v10_qtail_update
SET_SIZE(hcall_ncs_request)
*-----------------------------------------------------------
* Function: ncs_v10_qtail_update(int unused, ncs_qtail_update_arg_t *arg, size_t sz)
* %o5 - hcall function number
* %o1 - ncs_qtail_update_arg_t *
* %o2 - sizeof (ncs_qtail_update_arg_t)
* %o0 - EOK (on success),
* EINVAL, ENORADDR, EWOULDBLOCK, EIO (on failure)
*-----------------------------------------------------------
ENTRY_NP(ncs_v10_qtail_update)
cmp %o2, NCS_QTAIL_UPDATE_ARG_SIZE
VCPU_GUEST_STRUCT(%g7, %g4)
* Ignore the MID that the guest passes. We use vMID's now
* so whatever it passes is likely wrong, just calculate the MID
VCPU2STRAND_STRUCT(%g7, %g2)
ldub [%g2 + STRAND_ID], %g2
srlx %g2, STRANDID_2_COREID_SHIFT, %g2
GUEST_MID_GETMAU(%g4, %g2, %o2)
!! %g1 = maus[mid].mau_queue
* Make sure the tail index the caller
* gave us is a valid one for our queue,
* i.e. ASSERT(mq_nentries > nu_tail).
ldx [%g1 + MQ_NENTRIES], %g3
* Error if queue not configured,
!! %g3 = mau.mau_queue.mq_nentries
!! %g2 = ncs_qtail_update_arg.nu_tail
ldx [%o1 + NU_SYNCFLAG], %g6
mov %g4, %o1 ! %o1 = guest struct
* Turn tail index passed in by caller into
* actual pointer into queue.
sllx %g2, NCS_HVDESC_SHIFT, %g3
!! %g3 = &mau_queue.mq_base[nu_tail] (new mq_tail)
* Need hw-thread-id for MA_CTL register.
* Start at mq_head and keep looking for work
* until we run into mq_tail.
VCPU2STRAND_STRUCT(%g7, %g7)
ldub [%g7 + STRAND_ID], %g7 ! %g7 = physical cpuid
and %g7, NSTRANDS_PER_MAU_MASK, %g7 ! phys cpuid -> hw threadid
cmp %g2, %g3 ! mq_head == mq_tail?
be,a,pn %xcc, .v1_qtail_done
* Mark current descriptor busy.
stx %o0, [%g2 + NHD_STATE] ! nhd_state = BUSY
!! %g2 = ncs_hvdesc.nhd_regs
MAU_LOAD(%g2, %g7, %o0, %g6, .v1_qtail_addr_err, .v1_qtail_chk_rv, %o1, %o2, %g4)
* If this was an asynchronous descriptor then
* we're done! Leave MQ_BUSY set.
brnz,pt %g6, .v1_qtail_done_async
* In Niagara2 the Load value from the Sync
* register simply indicates whether the MAU
* was busy (1 = yes, 0 = no) at the time we
* issued the Load. It does not indicate a
* success or failure of the MAU operation.
* So, we effectively ignore the Load value and
* check for errors in the HWE/INVOP bits in
ldxa [%g4]ASI_STREAM, %g0
* Check error bits in Control register.
MAU_CHECK_ERR(%o0, %o1, %g4)
* Determine appropriate state to set
brnz,a,pn %o0, .v1_qtail_set_state
stx %o2, [%g2 + NHD_STATE]
brnz,a,pn %o0, .v1_qtail_err
add %g2, NCS_HVDESC_SIZE, %g2 ! mq_head++
cmp %g2, %g5 ! mq_head == mq_end?
ba,pt %xcc, .v1_qtail_loop
movgeu %xcc, %g4, %g2 ! mq_head = mq_base
!! %o0 = EWOULDBLOCK, EINVAL, ENORADDR, EIO
SET_SIZE(ncs_v10_qtail_update)
*-----------------------------------------------------------
* Function: ncs_v10_qconf(int unused, ncs_qconf_arg_t *arg, size_t sz)
* %o5 - hcall function number
* %o1 - ncs_qconf_arg_t *
* %o2 - sizeof (ncs_qconf_arg_t)
* %o0 - EOK (on success),
* EBADALIGN, ENORADDR, EINVAL (on failure)
*-----------------------------------------------------------
cmp %o2, NCS_QCONF_ARG_SIZE
ldx [%o1 + NQ_MID], %g2 ! %g2 = mid
* Recall that the driver code simply increments
* through all the possible vMIDs when doing a qconf,
* regardless of whether they are actually present
* or not. As a result, it is possible for
* the following macro to return null if the guest
* does not have access to that MAU. This is not a
* critical error since the driver code will never
* attempt to use a non-present mau, however the
* driver code cannot currently handle a "no mau"
* error return from this HV call and since the driver
* code is at present off-limit for repair, we have
* Guests calculate the MAUID based on cpu id, which are
* virtual ids. But firmware uses physical MIDs. So we need
* to translate the guest's vMID to a physical MID.
* Loop through the ROOT MID array and add 1 to the vMID for
* each unconfigured MAU we find.
GUEST_MID_GETMAU(%g1, %g4, %g3)
add %g3, MAU_QUEUE, %g1 ! %g1 = &maus[mid].mau_queue
brnz,a,pt %g2, .v1_qconf_config
* Caller wishes to unconfigure the mau_queue entry
brnz,pn %g4, herr_wouldblock
stx %g0, [%g1 + MQ_NENTRIES]
btst NCS_PTR_ALIGN - 1, %g5
bnz,pn %xcc, herr_badalign
sub %g3, %g2, %g5 ! %g5 = queue size (end-base)
* %g2 (RA(nq_base) -> PA(nq_base))
RA2PA_RANGE_CONV_UNK_SIZE(%g4, %g2, %g5, herr_noraddr, %g6, %g7)
* %g3 (RA(nq_end) -> PA(nq_end))
RA2PA_RANGE_CONV(%g4, %g3, 8, herr_noraddr, %g6, %g7)
* Verify that the queue size is what
* we would expect, i.e. (nq_nentries << NCS_HVDESC_SHIFT)
ldx [%o1 + NQ_NENTRIES], %g6
sllx %g6, NCS_HVDESC_SHIFT, %g7
* Head and Tail initially point to Base.
stx %g6, [%g1 + MQ_NENTRIES]
*-----------------------------------------------------------
* Function: ncs_qconf(uint64_t qtype, uint64_t baseaddr, uint64_t nentries)
* %o1 - base real address of queue or queue handle if
* %o2 - number of entries in queue
* %o0 - EOK (on success),
* EINVAL, ENOACCESS, EBADALIGN,
* %o1 - queue handle for respective queue.
*-----------------------------------------------------------
ENTRY_NP(hcall_ncs_qconf)
VCPU_GUEST_STRUCT(%g2, %g1)
IS_NCS_QTYPE_CWQ(%o0, NCS_QTYPE_CWQ, ncs_qconf_cwq)
SET_SIZE(hcall_ncs_qconf)
*-----------------------------------------------------------
* Function: ncs_qconf_mau
* %o1 - base real address of queue or queue handle if
* %o2 - number of entries in queue.
* %o0 - EOK (on success),
* EINVAL, ENOACCESS, EBADALIGN,
* ENORADDR, EWOULDBLOCK (on failure)
* %o1 - queue handle for respective queue.
*-----------------------------------------------------------
VCPU_GUEST_STRUCT(%g2, %g1)
brz,pn %o2, .m_qconf_unconfig
cmp %o2, NCS_MIN_MAU_NENTRIES
* Check that #entries is a power of two.
brz,pn %g3, herr_noaccess
* The cpu that does the queue configure will also
* be the one targeted for all the interrupts for
* this mau. We need to effectively single thread
* the interrupts per-mau because the interrupt handler
* updates global per-mau data structures.
VCPU2STRAND_STRUCT(%g2, %g7)
ldub [%g7 + STRAND_ID], %o0
* Make sure base address is size aligned.
sllx %o2, NCS_HVDESC_SHIFT, %g4
bnz,pn %xcc, herr_badalign
MAU_LOCK_ENTER(%g3, %g5, %g2, %g6)
* Translate base address from real to physical.
RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, .m_qconf_noraddr, %g6, %g2)
stx %o0, [%g3 + MAU_QUEUE + MQ_CPU_PID]
stx %o1, [%g3 + MAU_QUEUE + MQ_BASE_RA]
stx %g2, [%g3 + MAU_QUEUE + MQ_BASE]
stx %g2, [%g3 + MAU_QUEUE + MQ_HEAD]
stx %g2, [%g3 + MAU_QUEUE + MQ_TAIL]
stx %g2, [%g3 + MAU_QUEUE + MQ_END]
stx %o2, [%g3 + MAU_QUEUE + MQ_NENTRIES]
st %g0, [%g3 + MAU_QUEUE + MQ_BUSY]
stx %g0, [%g3 + MAU_QUEUE + MQ_HEAD_MARKER]
mov NCS_QSTATE_CONFIGURED, %g1
st %g1, [%g3 + MAU_QUEUE + MQ_STATE]
ldx [%g3 + MAU_HANDLE], %o1
MAU_HANDLE2ID_VERIFY(%o1, herr_inval, %g2)
GUEST_MID_GETMAU(%g1, %g2, %g3)
brz,pn %g3, herr_noaccess
MAU_LOCK_ENTER(%g3, %g5, %g1, %g6)
ld [%g3 + MAU_QUEUE + MQ_BUSY], %g4
brnz,pn %g4, .m_qconf_wouldblock
mov NCS_QSTATE_UNCONFIGURED, %g1
st %g1, [%g3 + MAU_QUEUE + MQ_STATE]
*-----------------------------------------------------------
* Function: ncs_qinfo(uint64_t qhandle)
* %o0 - EOK (on success),
* %o2 - queue base real address
* %o3 - number of queue entries
*-----------------------------------------------------------
ENTRY_NP(hcall_ncs_qinfo)
MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
GUEST_MID_GETMAU(%g1, %g2, %g3)
MAU_LOCK_ENTER(%g3, %g2, %g5, %g6)
ldx [%g3 + MAU_QUEUE + MQ_BASE_RA], %o2
ldx [%g3 + MAU_QUEUE + MQ_NENTRIES], %o3
SET_SIZE(hcall_ncs_qinfo)
*-----------------------------------------------------------
* Function: ncs_gethead(uint64_t qhandle)
* %o0 - EOK (on success),
* %o1 - queue head offset
*-----------------------------------------------------------
ENTRY_NP(hcall_ncs_gethead)
VCPU_GUEST_STRUCT(%g7, %g1)
MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
GUEST_MID_GETMAU(%g1, %g2, %g3)
MAU_LOCK_ENTER(%g3, %g5, %g2, %g6)
ldx [%g3 + MAU_QUEUE + MQ_BASE], %g1
ldx [%g3 + MAU_QUEUE + MQ_HEAD], %g2
SET_SIZE(hcall_ncs_gethead)
*-----------------------------------------------------------
* Function: ncs_gettail(uint64_t qhandle)
* %o0 - EOK (on success),
* %o1 - queue tail offset
*-----------------------------------------------------------
ENTRY_NP(hcall_ncs_gettail)
VCPU_GUEST_STRUCT(%g7, %g1)
MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
GUEST_MID_GETMAU(%g1, %g2, %g3)
MAU_LOCK_ENTER(%g3, %g5, %g2, %g6)
ldx [%g3 + MAU_QUEUE + MQ_BASE], %g1
ldx [%g3 + MAU_QUEUE + MQ_TAIL], %g2
SET_SIZE(hcall_ncs_gettail)
*-----------------------------------------------------------
* Function: ncs_qhandle_to_devino(uint64_t qhandle)
* %o0 - EOK (on success),
*-----------------------------------------------------------
ENTRY_NP(hcall_ncs_qhandle_to_devino)
MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
GUEST_MID_GETMAU(%g1, %g2, %g3)
HCALL_NCS_QHANDLE_TO_DEVINO_CWQ()
SET_SIZE(hcall_ncs_qhandle_to_devino)
*-----------------------------------------------------------
* Function: ncs_sethead_marker(uint64_t qhandle, uint64_t new_headoffset)
* %o0 - EOK (on success),
* EINVAL, ENORADDR (on failure)
*-----------------------------------------------------------
ENTRY_NP(hcall_ncs_sethead_marker)
MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
GUEST_MID_GETMAU(%g1, %g2, %g3)
btst NCS_HVDESC_SIZE - 1, %o1
bnz,a,pn %xcc, herr_inval
MAU_LOCK_ENTER(%g3, %g5, %g2, %g6)
ldx [%g3 + MAU_QUEUE + MQ_BASE], %g1
ldx [%g3 + MAU_QUEUE + MQ_END], %g2
stx %g1, [%g3 + MAU_QUEUE + MQ_HEAD_MARKER]
HCALL_NCS_SETHEAD_MARKER_CWQ()
SET_SIZE(hcall_ncs_sethead_marker)
*-----------------------------------------------------------
* Function: ncs_settail(uint64_t qhandle, uint64_t new_tailoffset)
* %o0 - EOK (on success),
* EINVAL, ENORADDR (on failure)
*-----------------------------------------------------------
ENTRY_NP(hcall_ncs_settail)
VCPU_GUEST_STRUCT(%g7, %g1)
HANDLE_IS_CWQ_BRANCH(%o0, %g2, ncs_settail_cwq)
SET_SIZE(hcall_ncs_settail)
*-----------------------------------------------------------
* Function: ncs_settail_mau(uint64_t qhandle, uint64_t new_tailoffset)
* %o0 - EOK (on success),
* EINVAL, ENORADDR (on failure)
*-----------------------------------------------------------
ENTRY_NP(ncs_settail_mau)
MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
GUEST_MID_GETMAU(%g1, %g2, %g3)
* Verify that we're on the MAU that the
btst NCS_HVDESC_SIZE - 1, %o1
bnz,a,pn %xcc, herr_inval
ldx [%g3 + MAU_QUEUE + MQ_BASE], %g1
ldx [%g3 + MAU_QUEUE + MQ_END], %g2
MAU_LOCK_ENTER(%g3, %g5, %g4, %g6)
* Update MQ_BUSY to indicate we're going to have work
* pending. If the current MQ_BUSY is non-zero then
* that indicates that queue has jobs and is being
* managed asynchronously (via mau_intr).
ld [%g3 + MAU_QUEUE + MQ_BUSY], %g2
st %g4, [%g3 + MAU_QUEUE + MQ_BUSY]
brz,pt %g2, .st_mau_dowork
ldx [%g3 + MAU_QUEUE + MQ_HEAD], %g2
* Queue already busy indicating queue is being
* actively managed by interrupt handler. So,
* all we have to do is insert job at tail and
ldx [%g3 + MAU_QUEUE + MQ_TAIL], %g4
stx %g1, [%g3 + MAU_QUEUE + MQ_TAIL]
stx %g1, [%g3 + MAU_QUEUE + MQ_TAIL]
* Use the per-cwq assigned cpu as target
* for interrupts for this job.
ldx [%g3 + MAU_QUEUE + MQ_CPU_PID], %g7
and %g7, NSTRANDS_PER_MAU_MASK, %g7 ! pid -> hw tid
cmp %g2, %g1 ! mq_head == mq_tail?
* Mark current descriptor busy.
stx %o0, [%g2 + NHD_STATE] ! nhd_state = BUSY
!! %g2 = ncs_hvdesc.nhd_regs
MAU_LOAD(%g2, %g7, %o0, 1, .st_mau_addr_err, .st_mau_chk_rv, %o1, %o5, %g4)
* We're done. The rest will be handled by MAU
* interrupt handler. Leave MQ_BUSY set.
* Determine appropriate state to set descriptor to.
movrnz %o0, ND_STATE_ERROR, %o5
stx %o5, [%g2 + NHD_STATE]
ldx [%g2 + NHD_TYPE], %o1
and %o1, ND_TYPE_END, %o1
add %g2, NCS_HVDESC_SIZE, %g2 ! mq_head++
ldx [%g3 + MAU_QUEUE + MQ_END], %g5
ldx [%g3 + MAU_QUEUE + MQ_BASE], %g4
cmp %g2, %g5 ! mq_head == mq_end?
movgeu %xcc, %g4, %g2 ! mq_head = mq_base
stx %g2, [%g3 + MAU_QUEUE + MQ_HEAD]
* If previous descriptor was not in error or was the
* last one in a job, then check the next descriptor
brnz,pn %o1, .st_mau_loop ! last descriptor?
bne,pn %xcc, .st_mau_loop
* If we reach here then we encountered an
* error on a descriptor within the middle
* of a job. Need to pop the entire job
* off the queue. We stop popping descriptors
* off until we either hit the Last one or
* hit the Tail of the queue.
* Note that we set state in all remaining
* descriptors in job to Error (ND_STATE_ERROR).
cmp %g2, %g1 ! queue empty?
bne,pt %xcc, .st_mau_set_state
st %g0, [%g3 + MAU_QUEUE + MQ_BUSY]
SET_SIZE(ncs_settail_mau)