* ========== Copyright Header Begin ==========================================
* Hypervisor Software File: ldc.s
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* - Do no alter or remove copyright notices
* - Redistribution and use of this software in source and binary forms, with
* or without modification, are permitted provided that the following
* - Redistribution of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistribution in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of Sun Microsystems, Inc. or the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
* You acknowledge that this software is not designed, licensed or
* intended for use in the design, construction, operation or maintenance of
* ========== Copyright Header End ============================================
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
.ident "@(#)ldc.s 1.14 07/07/17 SMI"
#include <sys/asm_linkage.h>
* Common block of code executed by all LDC package API
* calls to verify input value and fetch the pointer to
* ch_id (unmodified) - channel ID of LDC
* scr (modified) - scratch register
* guest (modified) - returns current guest pointer
* endpoint (modified) - returns the ldc_endpoint pointer
#define GET_LDC_ENDPOINT(ch_id, scr, guest, endpoint) \
set GUEST_LDC_MAX_CHANNEL_IDX, endpoint ;\
ldx [guest + endpoint], endpoint ;\
bgeu,pn %xcc, herr_invalchan /* is channel legit? */ ;\
mulx ch_id, LDC_ENDPOINT_SIZE, endpoint ;\
set GUEST_LDC_ENDPOINT, scr ;\
add endpoint, scr, endpoint ;\
add endpoint, guest, endpoint ;\
ldub [endpoint + LDC_IS_LIVE], scr ;\
brz,pn scr, herr_invalchan /* is channel live */ ;\
ldub [endpoint + LDC_IS_PRIVATE], scr ;\
brnz,pn scr, herr_invalchan /* is channel live */ ;\
* Macro to calculate channel status for the target in a
* guest<->guest LDC link.
* guest_endpt (unmodified) - guest endpoint stuct pointer
* status (return value) - Output: status of channel
* scr (modified) - scratch register
#define GET_GUEST_QUEUE_STATUS(guest_endpt, status, scr) \
ldx [guest_endpt + LDC_TARGET_GUEST], status ;\
ldx [guest_endpt + LDC_TARGET_CHANNEL], scr ;\
mulx scr, LDC_ENDPOINT_SIZE, scr ;\
add status, scr, status ;\
set GUEST_LDC_ENDPOINT, scr ;\
add scr, status, status /* target endpoint */ ;\
ldub [status + LDC_RX_UPDATED], scr ;\
brnz,a scr, 1f /* if flag is set... */ ;\
stb %g0, [status + LDC_RX_UPDATED] /* then clear it */ ;\
ldx [status + LDC_RX_QSIZE], status /* if qsize==0 then */ ;\
brz,a status, 0f /* status is DOWN */ ;\
mov LDC_CHANNEL_DOWN, status ;\
brz,a scr, 0f /* if qsize!=0 && updated=0 */ ;\
mov LDC_CHANNEL_UP, status /* then UP */ ;\
mov LDC_CHANNEL_RESET, status /* else RESET */ ;\
* Macro to calculate channel status for the target in a
* guest_endpt (unmodified) - guest endpoint stuct pointer
* status (return value) - Output: status of channel
* scr (modified) - scratch register
#define GET_SP_QUEUE_STATUS(guest_endpt, status, scr) \
ldx [status + CONFIG_SP_LDCS], status ;\
ldx [guest_endpt + LDC_TARGET_CHANNEL], scr ;\
mulx scr, SP_LDC_ENDPOINT_SIZE, scr ;\
add status, scr, status /* target endpoint */ ;\
ldx [status + SP_LDC_TX_QD_PA], status /* QD ptr to SRAM */ ;\
ldub [status + SRAM_LDC_STATE_UPDATED], scr ;\
brnz,a scr, 1f /* if flag set, then */ ;\
stb %g0, [status + SRAM_LDC_STATE_UPDATED] /* clear it */ ;\
ldub [status + SRAM_LDC_STATE], status /* status */ ;\
brz,a status, 0f /* DOWN = 0, UP = 1 */ ;\
mov LDC_CHANNEL_DOWN, status ;\
brz,a scr, 0f /* if status=UP && updated=0 */ ;\
mov LDC_CHANNEL_UP, status /* then UP */ ;\
mov LDC_CHANNEL_RESET, status /* else RESET */ ;\
* hv_ldc_chk_pkts - Check channel for pending pkts
* Check if the specified endpoint has any pkts available
* in either its Rx queue or peer's Tx queue. If pending,
* deliver an mondo to the CPU associated with this endpt.
* Used by ldc_vintr_setstate and ldc_vintr_setvalid to
* to notify guest when interrupts are enabled.
* %g1 endpoint being checked (modified)
* %g2 - %g6 scratch (modified)
ENTRY_NP(hv_ldc_chk_pkts)
lduw [%g1 + LDC_RX_QHEAD], %g2
lduw [%g1 + LDC_RX_QTAIL], %g3
cmp %g2, %g3 ! if queue empty (head==tail)
bne %xcc, .notify_guest ! check the transmit side
ldub [%g1 + LDC_TARGET_TYPE], %g2
cmp %g2, LDC_GUEST_ENDPOINT
be,pt %xcc, .peer_is_guest
bne,pt %xcc, .no_notification
! Target is a SP endpoint
! Read and compare the SRAM head and tail
ldx [%g2 + CONFIG_SP_LDCS], %g2 ! get SP endpoint array
ldx [%g1 + LDC_TARGET_CHANNEL], %g3 ! and target endpoint
mulx %g3, SP_LDC_ENDPOINT_SIZE, %g3
add %g2, %g3, %g2 ! and its struct
! quick check to see whether there are any packets
! to grab on this channel.
ldx [%g2 + SP_LDC_RX_QD_PA], %g2
ldub [%g2 + SRAM_LDC_HEAD], %g3
ldub [%g2 + SRAM_LDC_TAIL], %g4
bne,pn %xcc, .notify_guest
! Target is a guest endpoint
ldx [%g1 + LDC_TARGET_GUEST], %g2 ! find target guest
set GUEST_LDC_ENDPOINT, %g3
ldx [%g1 + LDC_TARGET_CHANNEL], %g3 ! and it's endpoint
mulx %g3, LDC_ENDPOINT_SIZE, %g3
add %g2, %g3, %g2 ! target endpt struct
lduw [%g2 + LDC_TX_QHEAD], %g3 ! check if src has
lduw [%g2 + LDC_TX_QTAIL], %g4 ! anything pending for
beq,pt %xcc, .no_notification
!! %g1 endpoint to deliver interrupt
add %g1, LDC_RX_MAPREG + LDC_MAPREG_STATE, %g4
casa [%g4]ASI_P, %g6, %g5
bne,a,pn %xcc, .no_notification
ldx [%g1 + LDC_RX_MAPREG + LDC_MAPREG_COOKIE], %g3
ldx [%g1 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g1
brz,pn %g1, .no_notification
ba send_dev_mondo ! tail call, returns to caller
SET_SIZE(hv_ldc_chk_pkts)
* arg1 q base raddr (%o1) - must be aligned to size of queue
* arg2 size (#entries) (%o2) - must be power of 2 (or 0 to unconfigure queue)
* Configure transmit queue for LDC endpoint.
ENTRY_NP(hcall_ldc_tx_qconf)
! verifies channel ID, returns pointers to guest and ldc_endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
brz,pn %o2, 2f ! size of 0 unconfigures queue
! number of entries must be a power of 2
sllx %o2, Q_EL_SIZE_SHIFT, %g4 ! convert #entries to bytes
! queue raddr must be aligned to size of queue
bnz,pn %xcc, herr_badalign ! base addr not aligned ?
RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, herr_noraddr, %g6, %g5)
! Note: The guest can flush a TX queue by (re)configuring it.
! If this happens, we still want to make sure that the the
! head/tail pointer consistency is maintained between the two
! guests and so we mark the queue empty without moving the
! tail pointer. Note that the tail pointer is set to zero at
! We do, however, need to make sure the new tail value is not
! larger than the size of the queue in case the guest is switching
stx %o1, [%g2 + LDC_TX_QBASE_RA]
stx %g5, [%g2 + LDC_TX_QBASE_PA]
lduw [%g2 + LDC_TX_QTAIL], %g6 ! read existing tail
sub %g5, %g6, %g5 ! bigger than qsize?
movrlz %g5, %g0, %g6 ! if so, we have to zero
brlz,a %g5, 1f ! the head and tail pointer
stw %g6, [%g2 + LDC_TX_QTAIL]
stw %g6, [%g2 + LDC_TX_QHEAD]
! set queue size last if queue is being configured
stx %g4, [%g2 + LDC_TX_QSIZE]
! All we need to do is set the qsize to zero if the queue is
! Note: we specifically no NOT clear the LDC_TX_QBASE_PA field
! because doing so could introduce a security hole.
stx %g0, [%g2 + LDC_TX_QSIZE]
SET_SIZE(hcall_ldc_tx_qconf)
* ret1 q base raddr (%o1)
* ret2 size (#entries) (%o2)
* Return information about the LDC endpoint's transmit queue.
ENTRY_NP(hcall_ldc_tx_qinfo)
! verifies channel ID, returns pointers to guest and ldc_endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
ldx [%g2 + LDC_TX_QBASE_RA], %o1
ldx [%g2 + LDC_TX_QSIZE], %g4
srlx %g4, Q_EL_SIZE_SHIFT, %o2
SET_SIZE(hcall_ldc_tx_qinfo)
* ret3 channel state (%o3)
* Return information about the current state of the queue.
ENTRY_NP(hcall_ldc_tx_get_state)
! verifies channel ID, returns pointers to guest and ldc_endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
ldx [%g2 + LDC_TX_QSIZE], %g3 ! no Q configured ?
lduw [%g2 + LDC_TX_QHEAD], %o1
lduw [%g2 + LDC_TX_QTAIL], %o2
ldub [%g2 + LDC_TARGET_TYPE], %g1 ! is this endpoint connected to
cmp %g1, LDC_HV_ENDPOINT ! the a hypervisor endpoint?
be,a %xcc, 3f ! if so, assume channel is up
cmp %g1, LDC_GUEST_ENDPOINT ! is this endpoint connected to
be %xcc, 2f ! another guest endpoint?
! must be a guest<->sp connection
GET_SP_QUEUE_STATUS(%g2, %o3, %g1)
GET_GUEST_QUEUE_STATUS(%g2, %o3, %g1)
SET_SIZE(hcall_ldc_tx_get_state)
* guest_to_guest_tx_set_tail
* %g2 - sender's endpoint
* Incriments the guest TX tail pointer and sends notification to the RX
* Note: It is important that the caller has already verifed that the
* new tail value is valid given the current state of the queue.
ENTRY_NP(guest_to_guest_tx_set_tail)
stw %g1, [%g2 + LDC_TX_QTAIL]
ldx [%g2 + LDC_TARGET_GUEST], %g5 ! find target guest
ldx [%g2 + LDC_TARGET_CHANNEL], %g3 ! end it's endpoint
mulx %g3, LDC_ENDPOINT_SIZE, %g4
set GUEST_LDC_ENDPOINT, %g6
ldx [%g4 + LDC_RX_QSIZE], %g6 ! no Q configured ?
brz,pn %g6, .tx_set_tail_done ! no notification
! Just leave the data in our transmit queue for now. The recipient
! will be responsible for pulling over the data into its receive
! queue when the guest on that end makes a call to check its
! receive queue head/tail pointers.
lduw [%g4 + LDC_RX_QHEAD], %g6
lduw [%g4 + LDC_RX_QTAIL], %g3
cmp %g6, %g3 ! only send and interrupt
bne %xcc, .tx_set_tail_done ! if the RX queue is empty.
! now see if we need to send an interrupt to the recipient
ldx [%g4 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g5
! if no target CPU specified, is there a vdev interrupt we
ldx [%g4 + LDC_RX_VINTR_COOKIE], %g5
brz,pn %g5, .tx_set_tail_done ! if not, we are done.
STRAND_PUSH(%g7, %g5, %g6)
HVCALL(vdev_intr_generate)
!! %g4 recipient's endpoint
STRAND_PUSH(%g7, %g5, %g6)
HVCALL(hv_ldc_cpu_notify)
SET_SIZE(guest_to_guest_tx_set_tail)
* Used by the guest to send data packets down the channel.
ENTRY_NP(hcall_ldc_tx_set_qtail)
! verifies channel ID, returns pointers to guest and ldc_endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
! new tail offset must be aligned properly
andcc %o1, Q_EL_SIZE-1, %g0
bnz,pn %xcc, herr_badalign
! Transmit queue configured?
ldx [%g2 + LDC_TX_QSIZE], %g3
! new tail offset must be within range
bleu,pn %xcc, herr_inval ! offset bigger than Q or less than 0?
! verify new tail value makes sense with respect to the old head/tail
lduw [%g2 + LDC_TX_QHEAD], %g3
lduw [%g2 + LDC_TX_QTAIL], %g4
ldx [%g2 + LDC_TX_QBASE_PA], %g7 ! save this off for now
! tail >= head i.e queue data not yet wrapped or queue empty
! verify ((new_tail > tail) || (new_tail < head))
ba herr_inval ! invalid tail value
! tail < head i.e. queue data currently wraps around end of queue
! verify ((new_tail > tail) && (new_tail < head))
2: ! input values verified
! Check to see if the target is a
! Guest domain or endpoint in HV or SP
ldub [%g2 + LDC_TARGET_TYPE], %g4
cmp %g4, LDC_GUEST_ENDPOINT
be,pt %xcc, .guest_target ! guest <-> guest connection
be,pt %xcc, .hv_target ! guest <-> hypervisor connection
* guest <-> SP connection
HVCALL(guest_to_sp_tx_set_tail) ! clobbers all %g1,%g3-%g7
ba .ldc_tx_set_qtail_done
! guest <-> hypervisor connection
! update tail pointer and invoke callback to process data
stw %o1, [%g2 + LDC_TX_QTAIL]
ldx [%g4 + CONFIG_HV_LDCS], %g4 ! get HV endpoint array
ldx [%g2 + LDC_TARGET_CHANNEL], %g1 ! and target endpoint
mulx %g1, LDC_ENDPOINT_SIZE, %g5
add %g4, %g5, %g5 ! and its struct
ldx [%g5 + LDC_RX_CB], %g6 ! get the callback
brz,pn %g6, .ldc_tx_set_qtail_done ! if none, drop pkt
ldx [%g5 + LDC_RX_CBARG], %g1 ! load the argument
add %g3, %g7, %g7 ! PA of the payload
ldx [%g2 + LDC_TX_QSIZE], %g5 ! each time we invoke the
dec Q_EL_SIZE, %g5 ! callback, it will consume one
add %g3, Q_EL_SIZE, %g3 ! element from the Q so we
and %g3, %g5, %g5 ! update the head pointer by
stw %g5, [%g2 + LDC_TX_QHEAD] ! one and store the new value.
jmp %g6 ! invoke callback
! Guest may have incrimented the tail pointer by more than one
! element and so now we must check to see whether the queue is
! empty. If not, we will have to invoke the callback again.
mulx %o0, LDC_ENDPOINT_SIZE, %g2
set GUEST_LDC_ENDPOINT, %g3
lduw [%g2 + LDC_TX_QHEAD], %g3
lduw [%g2 + LDC_TX_QTAIL], %g4
ldx [%g2 + LDC_TX_QBASE_PA], %g7 ! save this off for now
cmp %g3, %g4 ! Is Q empty now?
bne,pn %xcc, .ldc_tx_set_qtail_hv
! If Q is empty, we are done.
ba .ldc_tx_set_qtail_done
! guest <-> guest connection
HVCALL(guest_to_guest_tx_set_tail) ! clobbers all %g1,%g3-%g7
SET_SIZE(hcall_ldc_tx_set_qtail)
* arg1 q base raddr (%o1) - must be aligned to size of queue
* arg2 size (#entries) (%o2) - must be power of 2 (or 0 to unconfigure queue)
* Configure receive queue for LDC endpoint.
ENTRY_NP(hcall_ldc_rx_qconf)
! verifies channel ID, returns pointers to guest and ldc_endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2) ! %g1 guest, %g2 endpoint
brz,pn %o2, 2f ! size of 0 unconfigures queue
! number of entries must be a power of 2
sllx %o2, Q_EL_SIZE_SHIFT, %g4 ! convert #entries to bytes
! queue raddr must be aligned to size of queue
bnz,pn %xcc, herr_badalign ! base addr not aligned ?
RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, herr_noraddr, %g6, %g5)
! Note: The guest can flush a RX queue by (re)configuring it.
! If this happens, we still want to make sure that the the
! head/tail pointer consistency is maintained between the two
! guests and so we mark the queue empty without moving the
! tail pointer. Note that the tail pointer is set to zero at
! We do, however, need to make sure the new tail value is not
! larger than the size of the queue in case the guest is switching
stx %o1, [%g2 + LDC_RX_QBASE_RA]
stx %g5, [%g2 + LDC_RX_QBASE_PA]
lduw [%g2 + LDC_RX_QTAIL], %g6 ! read existing tail
sub %g5, %g6, %g5 ! bigger than qsize?
movrlz %g5, %g0, %g6 ! if so, we have to zero
brlz,a %g5, 1f ! the head and tail pointer
stw %g6, [%g2 + LDC_RX_QTAIL]
stw %g6, [%g2 + LDC_RX_QHEAD]
ldub [%g2 + LDC_TARGET_TYPE], %g3
!! %o1 - arg1 q base raddr
!! %o2 - arg2 size (#entries)
!! %g4 - new RX_QSIZE value if arg2 != 0
be,pn %xcc, sp_ldc_update_link_status ! returns directly to guest
be,pn %xcc, herr_inval ! should't be using SRAM LDC
brz,pn %o2, 3f ! new qsize of 0 unconfigures queue
ldx [%g2 + LDC_RX_QSIZE], %g6 ! read existing size
stb %g5, [%g2 + LDC_RX_UPDATED] ! if size not zero, set updated
stx %g4, [%g2 + LDC_RX_QSIZE] ! set last if Q being configured
! All we need to do is set the qsize to zero if the queue is
! Note: we specifically no NOT clear the LDC_RX_QBASE_PA field
! because doing so could introduce a security hole.
ldx [%g2 + LDC_RX_QSIZE], %g6 ! read existing size
stx %g0, [%g2 + LDC_RX_QSIZE]
brz,pn %g6, 5f ! if existing size=0, return
stb %g5, [%g2 + LDC_RX_UPDATED] ! else set updated, & notify
ldub [%g2 + LDC_TARGET_TYPE], %g3
cmp %g3, LDC_GUEST_ENDPOINT
ldx [%g2 + LDC_TARGET_GUEST], %g4
ldx [%g2 + LDC_TARGET_CHANNEL], %g3
mulx %g3, LDC_ENDPOINT_SIZE, %g3
set GUEST_LDC_ENDPOINT, %g6
add %g6, %g3, %g3 ! Target endpt struct
ldx [%g3 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g4
!! %g2 - this endpoint struct
!! %g3 - target endpoint struct
! Notify the other end that this endpoint's
! Rx queue was reconfigured
HVCALL(hv_ldc_cpu_notify)
SET_SIZE(hcall_ldc_rx_qconf)
* ret1 q base raddr (%o1)
* ret2 size (#entries) (%o2)
* Return information about the LDC endpoint's receive queue.
ENTRY_NP(hcall_ldc_rx_qinfo)
! verifies channel ID, returns pointers to guest and ldc_endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2) ! %g1 guest, %g2 endpoint
ldx [%g2 + LDC_RX_QBASE_RA], %o1
ldx [%g2 + LDC_RX_QSIZE], %g4
srlx %g4, Q_EL_SIZE_SHIFT, %o2
SET_SIZE(hcall_ldc_rx_qinfo)
* guest_to_guest_pull_data
* %g2 - receiver's endpoint (preserved)
* Pulls queue data (if available) from the target endpoint's TX queue
* into this specified endpoint's RX queue.
ENTRY_NP(guest_to_guest_pull_data)
! We will need to clobber some additional registers so save them
STRAND_PUSH(%g7, %g3, %g4)
STRAND_PUSH(%o1, %g3, %g4)
STRAND_PUSH(%o2, %g3, %g4)
STRAND_PUSH(%o3, %g3, %g4)
STRAND_PUSH(%g2, %g3, %g4)
ldx [%g2 + LDC_TARGET_GUEST], %g1 ! find sender's guest
ldx [%g2 + LDC_TARGET_CHANNEL], %g3 ! and it's endpoint
mulx %g3, LDC_ENDPOINT_SIZE, %g4
set GUEST_LDC_ENDPOINT, %g6
ldx [%g1 + LDC_TX_QSIZE], %g6 ! no TX Q configured ? Then
brz,pn %g6, .done_copying_data ! there is no data to pull over
! limit each call to copying a certain number of packets so as to
! not keep the CPU stuck in the hypervisor for too long.
set (LDC_MAX_PKT_COPY * Q_EL_SIZE), %g7
! make sure we are not trying to send more packets than
brlez %g7, .done_copying_data
lduw [%g1 + LDC_TX_QHEAD], %g3
lduw [%g1 + LDC_TX_QTAIL], %g4
sub %g4, %g3, %g4 ! check (tail - head) value.
brz %g4, .done_copying_data ! if zero, nothing to copy
nop ! since TX Q is empty.
brgz %g4, 1f ! If non-negative, then that's
nop ! how many bytes we need to
ldx [%g1 + LDC_TX_QSIZE], %g5 ! Else, we need to copy
sub %g5, %g3, %g4 ! (size - head) bytes from TX Q
!! %g3 sender's head pointer
!! %g4 bytes of data to copy
lduw [%g2 + LDC_RX_QHEAD], %o1
lduw [%g2 + LDC_RX_QTAIL], %g6
brgez %g5, 1f ! If non-negative, then that's
nop ! how many bytes we are able
ldx [%g2 + LDC_RX_QSIZE], %g5 ! our current RX Q size
brnz %o1, 1f ! but we can't fill our Q
nop ! completely so we must
sub %g5, Q_EL_SIZE, %g5 ! subtract if head is zero.
brz %g5, .done_copying_data ! if zero, nothing to copy
nop ! since our RX Q is full.
!! %g3 sender's head pointer
!! %g4 bytes of data to copy (sender)
!! %g5 bytes of data to copy (receiver)
! find the lesser of the two copy size values
! make sure we don't copy more packets than allowed per hcall
brgz,a %g5, 1f ! if we haven't yet sent the max allowed pkts,
mov %g5, %g7 ! then simply update our counter and continue.
! trying to copy more packets than (or exactly as many packets as)
mov %g7, %g4 ! limit the number of bytes about to be copied
clr %g7 ! update our counter
mov %g3, %o2 ! save off the original tx head
mov %g6, %o3 ! and rx tail values.
ldx [%g1 + LDC_TX_QBASE_PA], %o1
ldx [%g2 + LDC_RX_QBASE_PA], %o1
sub %g4, 8, %o1 ! use as loop index
ldx [%g3], %g5 ! read data from TX Q head
stx %g5, [%g6] ! write data to RX Q tail
add %g3, 8, %g3 ! incriment head pointer
brgez,pt %o1, 1b ! loop until done.
add %g6, 8, %g6 ! incriment tail pointer
! Now we need to update our head and tail pointers
ldx [%g2 + LDC_RX_QSIZE], %g5
stw %g5, [%g2 + LDC_RX_QTAIL]
ldx [%g1 + LDC_TX_QSIZE], %g5
stw %g5, [%g1 + LDC_TX_QHEAD]
!! %g4 bytes of data that were copied
! We might need to send a 'queue no longer full' interrupt
ldub [%g1 + LDC_TXQ_FULL], %g3
stb %g0, [%g1 + LDC_TXQ_FULL]
ldx [%g1 + LDC_RX_VINTR_COOKIE], %g1
HVCALL(vdev_intr_generate)
! Restore registers we were not supposed to clobber.
SET_SIZE(guest_to_guest_pull_data)
* ret3 channel state (%o3)
* Return information about the current state of the queue.
ENTRY_NP(hcall_ldc_rx_get_state)
! verifies channel ID, returns pointers to guest and ldc_endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2) ! %g1 guest, %g2 endpoint
ldx [%g2 + LDC_RX_QSIZE], %g3 ! no Q configured ?
! At this point (if the other end is a guest or SP) we want to go
! and check the other transmit queue and see if there is any data
! to pull into our recieve queue.
ldub [%g2 + LDC_TARGET_TYPE], %g3
!! %g2 guest endpoint (preserved)
HVCALL(guest_to_guest_pull_data) ! clobbers all %g1,%g3-%g7
GET_GUEST_QUEUE_STATUS(%g2, %o3, %g1)
!! %g2 guest endpoint (preserved)
HVCALL(sp_to_guest_pull_data) ! clobbers all %g1,%g3-%g7
GET_SP_QUEUE_STATUS(%g2, %o3, %g4)
lduw [%g2 + LDC_RX_QHEAD], %o1
lduw [%g2 + LDC_RX_QTAIL], %o2
SET_SIZE(hcall_ldc_rx_get_state)
* Used by the guest to indicate that it has received the packet(s).
ENTRY_NP(hcall_ldc_rx_set_qhead)
! verifies channel ID, returns pointers to guest and ldc_endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
! new head offset must be aligned properly
andcc %o1, Q_EL_SIZE-1, %g0
bnz,pn %xcc, herr_badalign
! Receive queue configured?
ldx [%g2 + LDC_RX_QSIZE], %g3
! new head offset must be within range
bleu,pn %xcc, herr_inval ! offset bigger than Q or less than 0?
! verify new head value makes sense with respect to the old head/tail
lduw [%g2 + LDC_RX_QHEAD], %g3
lduw [%g2 + LDC_RX_QTAIL], %g4
! head > tail i.e queue data currently wraps around end of queue
! verify ((new_head > head) || (new_head <= tail))
ba herr_inval ! invalid head value
1: ! tail >= head i.e. queue data not yet wrapped or queue empty
! verify ((new_head > head) && (new_head <= tail))
2: ! input values verified
stw %o1, [%g2 + LDC_RX_QHEAD]
SET_SIZE(hcall_ldc_rx_set_qhead)
* sp_ldc_update_link_status
* arg1 q base raddr (%o1)
* arg2 size (#entries) (%o2)
* Additionally, we are passed the following arguments:
* %g4 new RX_QSIZE value if arg2 != 0
* Called by the rx_qconf API routine for guest<->sp connections. The guest's
* head/tail pointers have already been updated at this point. The only
* things left to do here is:
* - update the rx_qsize field for this endpoint based on %o2/%g4
* - update the SRAM link status fields and possibly send interrupt to SP
* - return directly to guest
* N.B. It is important that we return directly to the guest from this
* routine (using HCALL_RET or the like). %g7 does not contain a return
* value to which we can branch.
ENTRY_NP(sp_ldc_update_link_status)
!! %g4 new rx_qsize value
ldx [%g1 + CONFIG_SP_LDCS], %g1
ldx [%g2 + LDC_TARGET_CHANNEL], %g3
mulx %g3, SP_LDC_ENDPOINT_SIZE, %g3
add %g1, %g3, %g1 ! target endpoint
ldx [%g1 + SP_LDC_RX_QD_PA], %g1 ! QD ptr to SRAM
!! %g4 new rx_qsize value
brz,pn %o2, 2f ! was guest trying to un-configure the queue?
ldx [%g2 + LDC_RX_QSIZE], %g6 ! reflects our link status
brnz,a %g6, 1f ! if status is not DOWN
stb %g3, [%g1 + SRAM_LDC_STATE_UPDATED] ! then set "updated" flag
stx %g4, [%g2 + LDC_RX_QSIZE] ! Store the new qsize value
stb %g3, [%g1 + SRAM_LDC_STATE] ! link status = UP
ba 3f ! send notification to SP
! All we need to do is set the qsize to zero if the queue is
! Note: we specifically no NOT clear the LDC_RX_QBASE_PA field
! because doing so could introduce a security hole.
ldx [%g2 + LDC_RX_QSIZE], %g3 ! read existing size
stb %g0, [%g1 + SRAM_LDC_STATE] ! link status = DOWN
stx %g0, [%g2 + LDC_RX_QSIZE]
brz,pn %g3, 5f ! if existing size=0, return
nop ! without sending interrupt
mov 1, %g3 ! set the "updated" flag
stb %g3, [%g1 + SRAM_LDC_STATE_UPDATED]
stb %g3, [%g1 + SRAM_LDC_STATE_NOTIFY]
! Send notification interrupt to the SP
! %g2 target endpoint (clobbered)
LDC_SEND_SP_INTR(%g2, %g3, %g4, SP_LDC_STATE_CHG)
SET_SIZE(sp_ldc_update_link_status)
* guest_to_sp_tx_set_tail
* Increments the guest TX tail pointer and sends notification to the
* Note: It is important that the caller has already verifed that the
* new tail value is valid given the current state of the queue.
ENTRY_NP(guest_to_sp_tx_set_tail)
ldx [%g3 + CONFIG_SP_LDCS], %g3 ! get SP endpoint array
ldx [%g2 + LDC_TARGET_CHANNEL], %g4 ! and target endpoint
mulx %g4, SP_LDC_ENDPOINT_SIZE, %g5
add %g3, %g5, %g3 ! and its struct
stw %g1, [%g2 + LDC_TX_QTAIL] ! update the tail
STRAND_PUSH(%g2, %g4, %g5) ! save off pointer
add %g3, SP_LDC_TX_LOCK, %g5
SPINLOCK_ENTER(%g5, %g6, %g4)
STRAND_PUSH(%g7, %g4, %g5)
HVCALL(sram_ldc_push_data) !! %g3 (sp endpoint) preserved
add %g3, SP_LDC_TX_LOCK, %g5
!! %g2 send interrupt flag
brz %g2, 1f ! skip notification if flag is clear
! %g3 target endpoint (clobbered)
LDC_SEND_SP_INTR(%g3, %g1, %g4, SP_LDC_DATA)
STRAND_POP(%g2, %g4) ! restore pointer
SET_SIZE(guest_to_sp_tx_set_tail)
* %g2 guest (receiver) endpoint (preserved)
* Pulls queue data (if available) from the target SP endpoint's TX queue
* into the specified guest endpoint's RX queue.
ENTRY_NP(sp_to_guest_pull_data)
! save off our return %pc value
STRAND_PUSH(%g7, %g3, %g4)
ldx [%g1 + CONFIG_SP_LDCS], %g1 ! get SP endpoint array
ldx [%g2 + LDC_TARGET_CHANNEL], %g4 ! and target endpoint
mulx %g4, SP_LDC_ENDPOINT_SIZE, %g5
add %g1, %g5, %g1 ! and its struct
! quick check to see whether there are any packets
! to grab on this channel.
ldx [%g1 + SP_LDC_RX_QD_PA], %g3
ldub [%g3 + SRAM_LDC_HEAD], %g4
ldub [%g3 + SRAM_LDC_TAIL], %g5
clr %g6 ! "send ACK" flag
add %g1, SP_LDC_RX_LOCK, %g3 ! PA of endpoint lock
SPINLOCK_ENTER(%g3, %g4, %g5)
! snapshot queue state into our scratch register area
! since we will be copying the data in possibly several
! passes and we want to ensure the guest cannot cause any
! HV corruption by reconfiguring the queue while we are
! executing this routine.
ldx [%g1 + SP_LDC_RX_QD_PA], %g5
ldub [%g5 + SRAM_LDC_HEAD], %g3
LDC_SRAM_IDX_TO_OFFSET(%g3)
stw %g3, [%g1 + SP_LDC_RX_SCR_TXHEAD] ! TX head
ldub [%g5 + SRAM_LDC_TAIL], %g3
LDC_SRAM_IDX_TO_OFFSET(%g3)
stw %g3, [%g1 + SP_LDC_RX_SCR_TXTAIL] ! TX tail
set (SRAM_LDC_QENTRY_SIZE * SRAM_LDC_ENTRIES_PER_QUEUE), %g3
stx %g3, [ %g1 + SP_LDC_RX_SCR_TXSIZE ] ! TX size
stx %g5, [ %g1 + SP_LDC_RX_SCR_TX_QDPA ] ! TX queue data PA
ldx [ %g1 + SP_LDC_RX_Q_DATA_PA ], %g5
stx %g5, [ %g1 + SP_LDC_RX_SCR_TX_QPA ] ! TX queue base PA
stx %g2, [%g1 + SP_LDC_RX_SCR_TARGET]
lduw [%g2 + LDC_RX_QTAIL], %g4
stw %g4, [%g1 + SP_LDC_RX_SCR_RXTAIL] ! RX tail
lduw [%g2 + LDC_RX_QHEAD], %g4
stw %g4, [%g1 + SP_LDC_RX_SCR_RXHEAD] ! RX head
ldx [%g2 + LDC_RX_QSIZE], %g4
stx %g4, [%g1 + SP_LDC_RX_SCR_RXSIZE] ! RX size
ldx [%g2 + LDC_RX_QBASE_PA], %g4
stx %g4, [%g1 + SP_LDC_RX_SCR_RX_QPA] ! RX queue base PA
lduw [%g1 + SP_LDC_RX_SCR_TXHEAD], %g2
lduw [%g1 + SP_LDC_RX_SCR_TXTAIL], %g3
ldx [%g1 + SP_LDC_RX_SCR_TXSIZE], %g4
LDC_QUEUE_DATA_AVAILABLE(%g2, %g3, %g4)
LDC_SRAM_OFFSET_TO_IDX(%g3)
!! %g3 packets of data to copy
lduw [%g1 + SP_LDC_RX_SCR_RXHEAD], %g4
lduw [%g1 + SP_LDC_RX_SCR_RXTAIL], %g5
ldx [%g1 + SP_LDC_RX_SCR_RXSIZE], %g7
LDC_QUEUE_SPACE_AVAILABLE(%g4, %g5, %g7, Q_EL_SIZE)
!! %g3 packets of data to copy
!! %g4 packets of available space
! find the lesser of the two copy size values
! must have at least one LDC packet to copy, otherwise we are done.
brlez %g3, .done_copy_from_sram
mov 1, %g6 ! "send ACK" flag
!! %g3 packets of data to copy
ldx [%g1 + SP_LDC_RX_SCR_TX_QPA], %g4
add %g2, %g4, %g2 ! PA of TX queue data
ldx [%g1 + SP_LDC_RX_SCR_RX_QPA], %g4
add %g5, %g4, %g5 ! PA of RX queue tail
!! %g3 packets of data to copy
sub %g3, 1, %g3 ! use as loop index
LDC_COPY_PKT_FROM_SRAM(%g2, %g5, %g4, %g7)
! Now we need to update our scratchpad head and tail pointers
ldx [%g1 + SP_LDC_RX_SCR_TX_QPA], %g7
sub %g2, %g7, %g2 ! New TX head offset
ldx [%g1 + SP_LDC_RX_SCR_TXSIZE], %g7
move %xcc, 0, %g2 ! check for wrap around
stw %g2, [%g1 + SP_LDC_RX_SCR_TXHEAD]
ldx [%g1 + SP_LDC_RX_SCR_RX_QPA], %g7
sub %g5, %g7, %g5 ! New RX tail offset
ldx [%g1 + SP_LDC_RX_SCR_RXSIZE], %g7
move %xcc, 0, %g5 ! check for wrap around
stw %g5, [%g1 + SP_LDC_RX_SCR_RXTAIL]
ldx [ %g1 + SP_LDC_RX_SCR_TX_QDPA ], %g4 ! queue data PA
ldx [ %g1 + SP_LDC_RX_SCR_TX_QPA ], %g4 ! queue base PA
lduw [ %g1 + SP_LDC_RX_SCR_TXHEAD ], %g3
LDC_SRAM_OFFSET_TO_IDX(%g3)
stb %g3, [%g4 + SRAM_LDC_HEAD] ! commit the new TX head
ldx [%g1 + SP_LDC_RX_SCR_TARGET], %g2
lduw [%g1 + SP_LDC_RX_SCR_RXTAIL], %g5
stw %g5, [%g2 + LDC_RX_QTAIL] ! commit the new RX tail
add %g1, SP_LDC_RX_LOCK, %g5 ! PA of endpoint lock
! Send the SP an ACK if we have pulled data from the SRAM
! %g6 target endpoint (clobbered)
LDC_SEND_SP_INTR(%g6, %g3, %g4, SP_LDC_SPACE)
!! %g1 sender's (sp) endpoint
! restore our return %pc value
SET_SIZE(sp_to_guest_pull_data)
* Routine to send as much data as possible from a guest's TX queue
* into the corresponding SRAM RX queue.
* NOTE: caller must own the SP endpoint TX lock before calling this
* %g2 guest endpoint (modified)
* %g3 sp endpoint (unmodified)
* %g7 return %pc value (unmodified)
* %g2 '1' if interrupt notification is required, 0 otherwise.
* Clobbers all globals except %g3 and %g7
ENTRY_NP(sram_ldc_push_data)
lduw [%g2 + LDC_TX_QTAIL], %g4
stw %g4, [%g3 + SP_LDC_TX_SCR_TXTAIL] ! TX tail
lduw [%g2 + LDC_TX_QHEAD], %g4
stw %g4, [%g3 + SP_LDC_TX_SCR_TXHEAD] ! TX head
ldx [%g2 + LDC_TX_QSIZE], %g4
stx %g4, [%g3 + SP_LDC_TX_SCR_TXSIZE] ! TX size
ldx [%g2 + LDC_TX_QBASE_PA], %g4
stx %g4, [%g3 + SP_LDC_TX_SCR_TX_QPA] ! TX q base PA
ldx [%g3 + SP_LDC_TX_QD_PA], %g4
ldub [%g4 + SRAM_LDC_HEAD], %g5
LDC_SRAM_IDX_TO_OFFSET(%g5)
stw %g5, [%g3 + SP_LDC_TX_SCR_RXHEAD] ! RX head
ldub [%g4 + SRAM_LDC_TAIL], %g5
LDC_SRAM_IDX_TO_OFFSET(%g5)
stw %g5, [%g3 + SP_LDC_TX_SCR_RXTAIL] ! RX tail
set (SRAM_LDC_QENTRY_SIZE*SRAM_LDC_ENTRIES_PER_QUEUE), %g5
stx %g5, [ %g3 + SP_LDC_TX_SCR_RXSIZE ] ! RX size
stx %g4, [ %g3 + SP_LDC_TX_SCR_RX_QDPA ] ! RX qd data PA
ldx [ %g3 + SP_LDC_TX_Q_DATA_PA ], %g4
stx %g4, [ %g3 + SP_LDC_TX_SCR_RX_QPA ] ! RX q base PA
stx %g2, [ %g3 + SP_LDC_TX_SCR_TARGET ]
lduw [%g3 + SP_LDC_TX_SCR_TXHEAD], %g4
lduw [%g3 + SP_LDC_TX_SCR_TXTAIL], %g6
ldx [%g3 + SP_LDC_TX_SCR_TXSIZE], %g2
LDC_QUEUE_DATA_AVAILABLE(%g4, %g6, %g2)
!! %g6 packets of data to copy
lduw [%g3 + SP_LDC_TX_SCR_RXHEAD], %g5
lduw [%g3 + SP_LDC_TX_SCR_RXTAIL], %g1
ldx [%g3 + SP_LDC_TX_SCR_RXSIZE], %g2
LDC_QUEUE_SPACE_AVAILABLE(%g5, %g1, %g2, SRAM_LDC_QENTRY_SIZE)
LDC_SRAM_OFFSET_TO_IDX(%g5)
!! %g5 packets of space available
!! %g6 packets of data to copy
* find the lesser of the two copy size values
* must have at least one LDC packet to copy,
ldx [%g3 + SP_LDC_TX_SCR_TX_QPA], %g5
add %g4, %g5, %g4 ! PA of TX queue data
ldx [%g3 + SP_LDC_TX_SCR_RX_QPA], %g5
add %g1, %g5, %g1 ! PA of RX queue tail
!! %g6 packets of data to copy
sub %g6, 1, %g6 ! use as loop index
LDC_COPY_PKT_TO_SRAM(%g4, %g1, %g5, %g2) ! moves pointers
* Now we need to update our scratchpad head/tail pointers
ldx [%g3 + SP_LDC_TX_SCR_TX_QPA], %g5
sub %g4, %g5, %g4 ! New TX head offset
ldx [%g3 + SP_LDC_TX_SCR_TXSIZE], %g5
move %xcc, 0, %g4 ! check for wrap around
stw %g4, [%g3 + SP_LDC_TX_SCR_TXHEAD]
ldx [%g3 + SP_LDC_TX_SCR_RX_QPA], %g5
sub %g1, %g5, %g1 ! New RX tail offset
ldx [%g3 + SP_LDC_TX_SCR_RXSIZE], %g5
move %xcc, 0, %g1 ! check for wrap around
stw %g1, [%g3 + SP_LDC_TX_SCR_RXTAIL]
ldx [%g3 + SP_LDC_TX_SCR_TARGET], %g2
* Write new TX head and RX tail values and see whether we
* need to send the SP notification
* We only send notification if the RX queue was empty. The
* algorithm we use to avoid missed interrupts is as
* Read orig RX head (orig_rx_hd)
* Read orig RX tail (orig_rx_tl)
* Write new RX tail value
* Read (possibly) new RX head (new_rx_hd)
* if ((orig_rx_hd==orig_rx_tl)||(new_rx_hd==orig_rx_tl)) {
lduw [ %g3 + SP_LDC_TX_SCR_TXHEAD ], %g6
stw %g6, [ %g2 + LDC_TX_QHEAD ] ! commit new TX head
ldx [ %g3 + SP_LDC_TX_SCR_RX_QDPA ], %g4 ! queue data PA
ldx [ %g3 + SP_LDC_TX_SCR_RX_QPA ], %g4 ! queue base PA
ldub [ %g4 + SRAM_LDC_HEAD ], %g5
LDC_SRAM_IDX_TO_OFFSET(%g5)
ldub [%g4 + SRAM_LDC_TAIL], %g1
LDC_SRAM_IDX_TO_OFFSET(%g1)
!! %g4 RX queue descriptor
lduw [%g3 + SP_LDC_TX_SCR_RXTAIL], %g6
LDC_SRAM_OFFSET_TO_IDX(%g6)
stb %g6, [%g4 + SRAM_LDC_TAIL] ! commit new RX tail
ldub [%g4 + SRAM_LDC_HEAD], %g4
LDC_SRAM_IDX_TO_OFFSET(%g4)
cmp %g1, %g5 ! (orig_rx_tl == orig_rx_hd) ?
cmp %g1, %g4 ! (orig_rx_tl == new_rx_hd) ?
cmp %g1, %g6 ! if old rx tail == new rx tail...
move %xcc, %g0, %g2 ! ...don't sent intr (no data sent)
!! %g2 send interrupt flag
SET_SIZE(sram_ldc_push_data)
ENTRY_NP(sp_to_guest_pull_data)
! Should never be invoked if this hypervisor is compiled
! without the FPGA support.
SET_SIZE(sp_to_guest_pull_data)
ENTRY_NP(guest_to_sp_tx_set_tail)
! Should never be invoked if this hypervisor is compiled
! without the FPGA support.
SET_SIZE(guest_to_sp_tx_set_tail)
* Binds the identified table with the given LDC
* int ldc_set_map_table(uint64_t channel, uint64_t table_ra,
* uint64_t table_entries);
* %o1 table_ra (0 disables mapping for given channel)
* EINVAL - illegal map table ra
* ECHANNEL - illegal channel
ENTRY_NP(hcall_ldc_set_map_table)
bnz,pn %xcc, herr_badalign ! base addr not aligned ?
set GUEST_LDC_MAX_CHANNEL_IDX, %g2
bgeu,pn %xcc, herr_invalchan
mulx %o0, LDC_ENDPOINT_SIZE, %g2
set GUEST_LDC_ENDPOINT, %g3
ldub [%g2 + LDC_IS_LIVE], %g3
brz,pn %g3, herr_invalchan
ldub [%g2 + LDC_IS_PRIVATE], %g3
brnz,pn %g3, herr_invalchan
brz,pn %o2, 1f ! size of 0 unconfigures table
cmp %o2, LDC_MIN_MAP_TABLE_ENTRIES ! Table smaller than min size
set LDC_MAX_MAP_TABLE_ENTRIES, %g4 ! Table size bigger than
cmp %o2, %g4 ! largest index we can store?
bge,pn %xcc, herr_inval ! invalid size
sub %o2, 1, %g4 ! Table size is not a ^2
sllx %o2, LDC_MTE_SHIFT, %g4 ! convert #entries to bytes
RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, herr_noraddr, %g6, %g5)
stx %o1, [%g2 + LDC_MAP_TABLE_RA]
stx %g5, [%g2 + LDC_MAP_TABLE_PA]
stx %o2, [%g2 + LDC_MAP_TABLE_NENTRIES]
stx %g4, [%g2 + LDC_MAP_TABLE_SZ] ! set last - tbl configured
stx %g0, [%g2 + LDC_MAP_TABLE_SZ] ! set first - tbl unconfigured
stx %g0, [%g2 + LDC_MAP_TABLE_NENTRIES]
stx %g0, [%g2 + LDC_MAP_TABLE_RA]
stx %g0, [%g2 + LDC_MAP_TABLE_PA]
SET_SIZE(hcall_ldc_set_map_table)
* Returns the map table infor for the given channel number
* int ldc_get_map_table(uint64_t channel);
* ECHANNEL - illegal channel
ENTRY_NP(hcall_ldc_get_map_table)
set GUEST_LDC_MAX_CHANNEL_IDX, %g2
bgeu,pn %xcc, herr_invalchan
mulx %o0, LDC_ENDPOINT_SIZE, %g2
set GUEST_LDC_ENDPOINT, %g3
ldub [%g2 + LDC_IS_LIVE], %g3
brz,pn %g3, herr_invalchan
ldub [%g2 + LDC_IS_PRIVATE], %g3
brnz,pn %g3, herr_invalchan
ldx [%g2 + LDC_MAP_TABLE_NENTRIES], %o2
ldx [%g2 + LDC_MAP_TABLE_RA], %o1
SET_SIZE(hcall_ldc_get_map_table)
* Copy in/out the data from the given cookie_addr
* for length bytes (multiple of 8) to/from the
* flags=0 for copyin (remote cookie buffer to local real),
* flags=1 for copyout (local real to remote cookie buffer)
* For EOK actual length copied is returned.
* int ldc_copy(ldc_channel_t chan,
* On EOK length copied is in %o1
* ECHANNEL - illegal channel
* ENOMAP - illegal / invalid cookie addr
* ENORADDR - illegal raddr to raddr+length
* EBADALIGN - badly aligned raddr or cookie_addr or length
* EINVAL - illegal flags etc., no map table assigned
* EBADPGSZ - page size does not match
* FIXME: Items to clean up with this block are:
* 1. Careful access to a MTE mapping with a quadd load
* 2. Better bcopy loop / legion version ..
* 2b. Possibly restrict alignment to either block or page
* size to enable allocating block stores.
* 3. Enable tracking of in progress copies so
* mapping tables can be dempaped or allocated to other
* 4. Reference count for channels using these tables.
! Enforce 8 byte alignment early
! Copy direction is either 0 or 1 : in or out
! ch_id, scr, guest, endpoint
GET_LDC_ENDPOINT(%o0, %g3, %g1, %g7)
! Check the RA range we've been given
! hstruct, raddr, size, fail_label, scr
RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o3, %o4, herr_noraddr, %g4, %g3)
! Check endpoint connection type
ldub [%g7 + LDC_TARGET_TYPE], %g3 ! if type=0, target is guest
brz,pt %g3, .guest_copy ! else copying directly
! Copy data from HV memory to guest RA
!! %o2 PA of address in HV
! no need for table lookup
! FIXME: we need a way to verify if the PA is
! Must check against the currently valid ranges of phys mem
! Limit length to the end of a page
and %o2, %g5, %g5 ! offset into remote page
sub %g4, %g5, %g4 ! distance to end of page
cmp %g4, %o4 ! distance <= length
movl %xcc, %g4, %o4 ! clamp to end of page
! Find the corresponding endpoint at the recipient ..
ldx [%g7 + LDC_TARGET_GUEST], %g2
ldx [%g7 + LDC_TARGET_CHANNEL], %g5
mulx %g5, LDC_ENDPOINT_SIZE, %g5
set GUEST_LDC_ENDPOINT, %g6
add %g4, %g6, %g4 ! g4 is the target endpoint
ldub [%g4 + LDC_IS_LIVE], %g3
brz,pn %g3, herr_invalchan
ldub [%g4 + LDC_IS_PRIVATE], %g3
brnz,pn %g3, herr_invalchan
! Find our map table PA from endpoint
ldx [%g4 + LDC_MAP_TABLE_PA], %g3
srlx %o2, 60, %g6 ! extract page size
brnz,pn %g6, herr_badpgsz ! only 8K for now
sllx %o2, 8, %g6 ! Extract cookie idx
srlx %g6, 13+8, %g6 ! Bits: 56-pg_size_bits
ldx [%g4 + LDC_MAP_TABLE_NENTRIES], %g5 ! table entries
bge,pn %xcc, herr_nomap ! off end of table ?
sllx %g6, LDC_MTE_SHIFT, %g6 ! Size of MTE
ldx [%g3 + %g6], %g3 ! MTE itself
srlx %g3, LDC_MTE_PERM_CPRD_BIT, %g5
bz,pn %xcc, herr_noaccess ! error for invalid MTE? FIXME
sllx %g5, 13, %g5 ! Currently assume 8K pages
andn %g6, %g5, %g6 ! Create a Rpfn mask
and %g3, %g6, %g3 ! Extract target real pfn
and %o2, %g5, %g5 ! Extract page offset
or %g3, %g5, %g3 ! Target RA
! Limit the copy to the map page size
add %g5, %o4, %g4 ! Length of copy + offset
movl %xcc, %g6, %o4 ! Limit copy to end of page
! Check that we are in range
! get the PA for the exported page
RA2PA_RANGE_CONV_UNK_SIZE(%g2, %g3, %o4, herr_noraddr, %g6, %o2)
! Copy the data from one page to another
!! %o2 phys addr of remote buffer
!! %o3 checked RA of local buffer
RA2PA_CONV(%g1, %o3, %g5, %g6)
!! %g5 PA of local buffer
! Clamp if closer to end of local page than remote buffer
sub %g2, %g3, %g3 ! Number of bytes to end of page
cmp %g3, %o4 ! if < copy len, clamp copy len
! See if it is a LDC_COPY_IN
! FIXME: Use an optimized block allocating copy !
* Allocate hypervisor map table entry for given cookie
* so that we can track usage model.
* Returns a RA that identifies the tracking slot.
* int hv_ldc_mapin(int channel, uint64_t cookie)
* ECHANNEL - illegal channel
* ENOMAP - illegal / invalid cookie addr
* ENORADDR - illegal raddr to raddr+length
* EBADALIGN - badly aligned raddr or cookie_addr or length
* EINVAL - illegal flags etc., no map table assigned
* EBADPGSZ - page size does not match
ENTRY_NP(hcall_ldc_mapin)
! Stash the args into the cpu struct scratch area
! so we can retrieve later (7 g regs arent enough)
! FIXME: could use cpu push/pop
stx %o0, [%g1 + CPU_SCR0]
stx %o1, [%g1 + CPU_SCR1]
GET_LDC_ENDPOINT( %o0, %g7, %g1, %g2 ) !! %g1=guest, %g2=endpoint
! FIXME: Workaround to disable mapin support if
! HV MD does not have RA range in guest MDs
! to be deleted later ...
set GUEST_LDC_MAPIN_BASERA, %g3
brz,pn %g3, herr_notsupported
ldub [%g2 + LDC_TARGET_TYPE], %g3 ! if type=0, target is guest
brnz,pt %g3, herr_inval ! we're ok - fail HV
! Find the target endpoint ...
ldx [%g2 + LDC_TARGET_GUEST], %g3
ldx [%g2 + LDC_TARGET_CHANNEL], %g4
mulx %g4, LDC_ENDPOINT_SIZE, %g4
set GUEST_LDC_ENDPOINT, %g5
add %g5, %g4, %g4 ! g4 is the target endpoint
ldub [%g4 + LDC_IS_LIVE], %g5
brz,pn %g5, herr_invalchan
ldub [%g4 + LDC_IS_PRIVATE], %g5
brnz,pn %g5, herr_invalchan
! Find table index after page size
srlx %o1, LDC_COOKIE_PGSZC_SHIFT, %g2 ! extract pg size
! If its not a valid page size assume cookie is bogus
set TTE_VALIDSIZEARRAY, %g5
sllx %o1, 64-LDC_COOKIE_PGSZC_SHIFT, %g6 ! shift off pg size
add %g5, 13 + (64-LDC_COOKIE_PGSZC_SHIFT), %g5 ! shift for index
srlx %g6, %g5, %g5 ! get index
! Check index to see if it is in range
ldx [%g4 + LDC_MAP_TABLE_NENTRIES], %g6
! Find remote map table PA from endpoint
ldx [%g4 + LDC_MAP_TABLE_PA], %g6
sllx %g5, LDC_MTE_SHIFT, %g7
! Check we have permission for something ... (ie valid)
! We ignore the copyin/copyout flags
srlx %g7, LDC_MTE_PERM_SHIFT, %o0
and %o0, LDC_MTE_PERM_MASK, %o0
andcc %o0, LDC_MAPIN_MASK, %g0
beq,pn %xcc, herr_noaccess
! NOTE: We already checked the cookie against the list of
! legit page sizes, so if this matches we dont need
! to check if MTE page size is legal.
srlx %g7, LDC_MTE_PGSZ_SHIFT, %o0
and %o0, LDC_MTE_PGSZ_MASK, %o0
bne,pn %xcc, herr_badpgsz
! sacrifice %g2 and %g4 in here...
! Check if the entry has a legit RA range for the
sub %o1, 1, %o1 !! %o1 size of page
! Extract the RA & check for alignment
sllx %g7, LDC_MTE_RSVD_BITS, %g2
srlx %g2, 13+LDC_MTE_RSVD_BITS, %g2
bne,pn %xcc, herr_badalign ! page not aligned
! Is this a legit RA for the other guest
RA2PA_RANGE_CONV_UNK_SIZE(%g3, %o0, %o1, herr_noraddr, %g4, %g2)
! Finally everything checks out
! Let's allocate a mapin entry - fill in the details
! Picking a mapin entry has to be atomic in case we're in
! a race with another map-in
set GUEST_LDC_MAPIN_FREE_IDX, %o0
ldx [%o0], %o1 ! -1 == No more free available
mulx %o1, LDC_MAPIN_SIZE, %g2 ! Extract next idx from free
ldx [%g2 + LDC_MI_NEXT_IDX], %g4
casxa [%o0]ASI_P, %o1, %g4
! Fill in the mapin entry values
!! %o1 index of mapin entry
!! %g2 address of mapin entry
! Stash away remainding O regs for space.
ldx [%o0 + CPU_SCR0], %g4
sth %g4, [%g2 + LDC_MI_LOCAL_ENDPOINT]
stw %g5, [%g2 + LDC_MI_MAP_TABLE_IDX]
! g4 and g5 now available
srlx %g7, LDC_MTE_PERM_SHIFT, %g4
and %g4, LDC_MTE_PERM_MASK, %o2 ! Return perms
and %g4, LDC_MAPIN_MASK, %g4
stb %g4, [%g2 + LDC_MI_PERMS]
sllx %g7, LDC_MTE_RSVD_BITS, %g4
srlx %g4, 13+LDC_MTE_RSVD_BITS, %g4
RA2PA_CONV(%g3, %g4, %g4, %g5)
! RA was already checked for alignment, so
! PA must also be aligned - no check required
stx %g4, [%g2 + LDC_MI_PA]
! Extract the page size again
srlx %g7, LDC_MTE_PGSZ_SHIFT, %g4
and %g4, LDC_MTE_PGSZ_MASK, %g4
stb %g4, [%g2 + LDC_MI_PG_SIZE]
! Use it to figure out the RA offset from the
stx %g0, [%g2 + LDC_MI_VA]
sth %g0, [%g2 + LDC_MI_VA_CTX]
stx %g0, [%g2 + LDC_MI_IO_VA]
stx %g0, [%g2 + LDC_MI_MMU_MAP]
#if (LARGEST_PG_SIZE_BITS+LDC_NUM_MAPINS_BITS) > 55
#error Sanity check failed: too many mapin entries to encode in RA
sllx %o1, LARGEST_PG_SIZE_BITS, %o1
set GUEST_LDC_MAPIN_BASERA, %g4
! scrap g1 and replace with cpu struct
ldx [%g1 + CPU_SCR1], %g1
SET_SIZE(hcall_ldc_mapin)
* Simple support function to release and clear a mapin entry
* that is no longer in use
* %g2 index of mapin entry
set GUEST_LDC_MAPIN_FREE_IDX, %g3
add %g1, %g3, %g3 ! Address of free idx
mulx %g2, LDC_MAPIN_SIZE, %g1
add %g1, %g4, %g1 ! Address of mapin entry
! Perms are used to determine liveness
stb %g0, [%g1 + LDC_MI_PERMS]
stx %g4, [%g1 + LDC_MI_NEXT_IDX] ! do first so link-in is atomic
casxa [%g3]ASI_P, %g4, %g5
* callback for console input
* %g1 callback arg (guest struct)
ENTRY_NP(cons_ldc_callback)
! get the console struct for this endpt
ldub [%g1 + CONS_STATUS], %g4 ! chk if console is ready
andcc %g4, LDC_CONS_READY, %g0
ldub [%g2], %g6 ! get the packet type
cmp %g6, LDC_CONSOLE_DATA
beq,pt %xcc, .console_data
cmp %g6, LDC_CONSOLE_CONTROL ! check if control pkt
bne,pt %xcc, 1f ! else drop pkt and return
ldub [%g1 + CONS_STATUS], %g4 ! get console status
or %g4, LDC_CONS_BREAK, %g4 ! set the break bit
lduw [%g2 + LDC_CONS_CTRL_MSG], %g6 ! get control message
cmp %g6, %g5 ! chk it is a break
stb %g4, [%g1 + CONS_STATUS]
ldub [%g1 + CONS_STATUS], %g4 ! get console status
or %g4, LDC_CONS_HUP, %g4 ! set the hup bit
lduw [%g2 + LDC_CONS_CTRL_MSG], %g6 ! get control message
cmp %g6, %g5 ! chk it is a hangup
stb %g4, [%g1 + CONS_STATUS]
ba 1f ! invalid control message
ldx [%g1 + CONS_INTAIL], %g4 ! get current tail
ldub [%g2 + LDC_CONS_SIZE], %g3 ! get num chars
add %g2, LDC_CONS_PAYLOAD, %g2 ! start from second word
add %g1, CONS_INBUF, %g5 ! incoming buffer
add %g5, %g4, %g5 ! dest buf offset loc
and %g4, (CONS_INBUF_SIZE - 1), %g4 ! and wrap
bnz,pn %xcc, 2b ! if not zero, copy next byte
stx %g4, [%g1 + CONS_INTAIL] ! store the new tail
SET_SIZE(cons_ldc_callback)
!! %g2 real address to be mapped
!! %g4 offset in mapin RA region
! XXX - Need to check that bits between LARGEST_PG_SIZE_BITS and
! actual page size are zero toprevent aliasing.
ENTRY_NP(ldc_dmmu_mapin_ra)
.global rdmmu_miss_not_found2
GET_MAPIN_ENTRY(%g1, %g4, %g5)
ldub [%g5 + LDC_MI_PERMS], %g6
andcc %g6, LDC_MAP_R|LDC_MAP_W, %g0
beq,pn %xcc, rdmmu_miss_not_found2
! OK have a mapable RA with some permissions
! stuff the DTLB with the right info.
! FIXME: cant support write only with N1s TLB
ldub [%g5 + LDC_MI_PG_SIZE], %g4
ldx [%g5 + LDC_MI_PA], %g3
andcc %g6, LDC_MAP_W, %g0
or %g6, TTE_CP|TTE_P, %g6 ! TTE_CP wont fit cmov
sllx %g6, 63, %g6 ! valid bit
! TAG register is still configured for us
mov TLB_IN_REAL|TLB_IN_4V_FORMAT, %g2
stxa %g3, [%g2]ASI_DTLB_DATA_IN
! Now the expensive bit - track the MMU usage
ldub [%g1 + STRAND_ID], %g1 /* FIXME: use asr26? */
add %g1, MIE_RA_MMU_SHIFT, %g1
add %g5, LDC_MI_MMU_MAP, %g5 !!!
ATOMIC_OR_64(%g5, %g1, %g2, %g3)
SET_SIZE(ldc_dmmu_mapin_ra)
! FIXME: need equivalent for immu of dmmu_mapin_ra
!! %g5 offset into mapin region
! FIXME: need to cross leverage with ldc_dmmu_mapin_ra
! FIXME: Need to check that bits between LARGEST_PG_SIZE_BITS and
! actual page size are zero toprevent aliasing.
GET_MAPIN_ENTRY(%g6, %g5, %g2)
ldub [%g2 + LDC_MI_PERMS], %g6
andcc %g6, LDC_MAP_R|LDC_MAP_W, %g0
! OK have a mapable RA with some permissions
! stuff the DTLB with the right info.
! FIXME: cant support write only with N1s TLB
! Fail page size mis-match otherwise our demap doesnt work
and %g7, TTE_SZ_MASK, %g4
ldub [%g2 + LDC_MI_PG_SIZE], %g5
ldx [%g2 + LDC_MI_PA], %g5
andcc %g6, LDC_MAP_W, %g0
andn %g5, %g6, %g5 ! clear w bit if no write permission
CLEAR_TTE_LOCK_BIT(%g5, %g6) ! %g5 tte (force clear lock bit)
! TAG register is still configured for us
mov TLB_IN_4V_FORMAT, %g6
stxa %g5, [%g6]ASI_DTLB_DATA_IN
! Pull the fault address and context, save it
and %g3, %g4, %g5 ! context
sth %g5, [%g2 + LDC_MI_VA_CTX]
andn %g3, %g4, %g3 ! vaddr
stx %g3, [%g2 + LDC_MI_VA]
! Now the expensive bit - track the MMU usage
VCPU2STRAND_STRUCT(%g1, %g1)
ldub [%g1 + STRAND_ID], %g1
add %g1, MIE_VA_MMU_SHIFT, %g1
add %g2, LDC_MI_MMU_MAP, %g2
ATOMIC_OR_64(%g2, %g1, %g5, %g3)
mov MMU_FT_INVALIDRA, %g1
! Pull the fault address and context again
and %g3, %g2, %g5 ! context
andn %g3, %g2, %g3 ! addr
!! %g3 TTE without PA/RA field
!! %g5 offset into mapin region
! FIXME: need to cross leverage with ldc_dmmu_mapin_ra
! FIXME: Need to check that bits between LARGEST_PG_SIZE_BITS and
! actual page size are zero toprevent aliasing.
ENTRY_NP(ldc_map_addr_api)
.global hcall_mmu_map_addr_ra_not_found
GET_MAPIN_ENTRY(%g6, %g5, %g2)
! If we ask for an I mapping, make sure we have MAP_X
! If we ask for a D mapping, make sure we have at least MAP_R
! .. we require MAP_W if the TTE has WPERM and we ask for D mapping
movne %xcc, LDC_MAP_R, %g4
movne %xcc, LDC_MAP_X, %g7
movne %xcc, LDC_MAP_W, %g7
ldub [%g2 + LDC_MI_PERMS], %g4
ldub [%g2 + LDC_MI_VA_MMU_MAP], %g7
ldx [%g2 + LDC_MI_VA], %g7
lduh [%g2 + LDC_MI_VA_CTX], %g7
andcc %g4, LDC_MAP_W, %g0
move %xcc, TTE_W, %g4 ! if !=0 move correct value in
andn %g3, %g4, %g3 ! clear w bit if no write permission
! OK have a mapable RA with some permissions
! stuff the DTLB with the right info.
! FIXME: cant support write only with N1s TLB
! Fail page size mis-match otherwise our demap doesnt work
and %g3, TTE_SZ_MASK, %g4
ldub [%g2 + LDC_MI_PG_SIZE], %g7
bne,pn %xcc, hcall_mmu_map_addr_ra_not_found
ldx [%g2 + LDC_MI_PA], %g7
or %g3, %g7, %g3 ! start building TTE
CLEAR_TTE_LOCK_BIT(%g3, %g7) ! %g3 tte (force clear lock bit)
or %o0, %o1, %g4 !! %g4 tag
mov TLB_IN_4V_FORMAT, %g6
btst MAP_ITLB, %o3 ! Test in delay slot to setup xcc
stxa %g3, [%g6]ASI_DTLB_DATA_IN
stxa %g3, [%g6]ASI_ITLB_DATA_IN
stx %o0, [%g2 + LDC_MI_VA]
sth %o1, [%g2 + LDC_MI_VA_CTX]
! Now the expensive bit - track the MMU usage
VCPU2STRAND_STRUCT(%g1, %g1)
ldub [%g1 + STRAND_ID], %g1
add %g1, MIE_VA_MMU_SHIFT, %g1
add %g2, LDC_MI_MMU_MAP, %g2
ATOMIC_OR_64(%g2, %g1, %g5, %g3)
SET_SIZE(ldc_map_addr_api)
* Unmaps the page mapped at RA from the local guest.
* FIXME: We assume for the moment the guest has done the right
* demap clean up - so all we have to do here is free up the
* internal structure associated with the map table entry.
* This is currently a big security hole, but not a functional
* gap for the moment, since the only guest we have (Solaris) is
* well behaved. So we have to fix this eventually.
* FIXME: Check that unused raddr bits are zero in case of aliasing
ENTRY_NP(hcall_ldc_unmap)
set GUEST_LDC_MAPIN_BASERA, %g4
bneg,pn %xcc, herr_noraddr
set GUEST_LDC_MAPIN_SIZE, %g4
brgez,pn %g4, herr_noraddr
srlx %g5, LARGEST_PG_SIZE_BITS, %g2 ! mapin idx
mulx %g2, LDC_MAPIN_SIZE, %g5
add %g1, %g5, %g5 ! addr of mapin entry
ldub [%g5 + LDC_MI_PERMS], %g6
SET_SIZE(hcall_ldc_unmap)
* FIXME: Currently not used, so fault in when we have a guest that
* arg2 revoke_cookie (%o2)
ENTRY_NP(hcall_ldc_revoke)
SET_SIZE(hcall_ldc_revoke)
ENTRY_NP(ldc_vintr_getcookie)
bgeu,pn %xcc, get_cookie_fail
mulx %o1, LDC_I2E_SIZE, %g2
ldx [%g1 + LDC_I2E_MAPREG], %g1
brz,pn %g1, get_target_fail
! load the cookie from the target endpoint structure mapreg
ldx [%g1 + LDC_MAPREG_COOKIE], %o1
SET_SIZE(ldc_vintr_getcookie)
ENTRY_NP(ldc_vintr_setcookie)
bgeu,pn %xcc, set_cookie_fail
mulx %o1, LDC_I2E_SIZE, %g2
ldx [%g1 + LDC_I2E_MAPREG], %g1
brz,pn %g1, set_cookie_fail
! store the cookie to the target endpoint structure mapreg
stx %o2, [%g1 + LDC_MAPREG_COOKIE]
SET_SIZE(ldc_vintr_setcookie)
* ret1 intr valid state (%o1)
ENTRY_NP(ldc_vintr_getvalid)
bgeu,pn %xcc, get_valid_fail
mulx %o1, LDC_I2E_SIZE, %g2
ldx [%g1 + LDC_I2E_MAPREG], %g1
brz,pn %g1, get_valid_fail
ldub [%g1 + LDC_MAPREG_VALID], %o1
SET_SIZE(ldc_vintr_getvalid)
* arg2 intr valid state (%o2) 1: Valid 0: Invalid
ENTRY_NP(ldc_vintr_setvalid)
bgeu,pn %xcc, set_valid_fail
mulx %o1, LDC_I2E_SIZE, %g2
ldx [%g1 + LDC_I2E_MAPREG], %g2
brz,pn %g2, set_valid_fail
ldx [%g1 + LDC_I2E_ENDPOINT], %g1
! for valid RX interrupts only, if state is IDLE check if we need
brz,pn %o2, 1f ! interrupt VALID?
stb %o2, [%g2 + LDC_MAPREG_VALID] ! regardless, fill in status
add %g1, LDC_RX_MAPREG, %g3
ld [%g2 + LDC_MAPREG_STATE], %g3 ! only bother if interrupt
cmp %g3, INTR_IDLE ! IDLE
! check if there are pending pkts, if any, notify guest
SET_SIZE(ldc_vintr_setvalid)
ENTRY_NP(ldc_vintr_gettarget)
bgeu,pn %xcc, get_target_fail
mulx %o1, LDC_I2E_SIZE, %g2
ldx [%g1 + LDC_I2E_MAPREG], %g1
brz,pn %g1, get_target_fail
! load the cpup from the target endpoint structure mapreg
ldx [%g1 + LDC_MAPREG_CPUP], %g1
ldub [%g1 + CPU_VID], %o1
SET_SIZE(ldc_vintr_gettarget)
ENTRY_NP(ldc_vintr_settarget)
bgeu,pn %xcc, set_target_fail
! convert vcpuid to pcpup
VCPUID2CPUP(%g1, %o2, %o2, herr_nocpu, %g2)
mulx %o1, LDC_I2E_SIZE, %g2
ldx [%g1 + LDC_I2E_MAPREG], %g1
brz,pn %g1, set_target_fail
! store cpup to target endpoint structure mapreg
stx %o2, [%g1 + LDC_MAPREG_CPUP]
SET_SIZE(ldc_vintr_settarget)
* ret1 (%o1) 0: idle 1: received 2: delivered
ENTRY_NP(ldc_vintr_getstate)
bgeu,pn %xcc, get_state_fail
mulx %o1, LDC_I2E_SIZE, %g2
ldx [%g1 + LDC_I2E_MAPREG], %g1
brz,pn %g1, get_state_fail
ld [%g1 + LDC_MAPREG_STATE], %o1
SET_SIZE(ldc_vintr_getstate)
* arg2 (%o2) 0: idle 1: received 2: delivered
ENTRY_NP(ldc_vintr_setstate)
bgeu,pn %xcc, set_state_fail
brlz,pn %o2, set_state_fail
bgu,pn %xcc, set_state_fail
mulx %o1, LDC_I2E_SIZE, %g2
ldx [%g1 + LDC_I2E_MAPREG], %g2
brz,pn %g2, set_state_fail
ldx [%g1 + LDC_I2E_ENDPOINT], %g1
! for valid RX interrupts only, if state is IDLE check if we need
cmp %o2, INTR_IDLE ! interrrupt IDLE?
st %o2, [%g2 + LDC_MAPREG_STATE] ! regardless, fill in state
add %g1, LDC_RX_MAPREG, %g3
ldub [%g2 + LDC_MAPREG_VALID], %g3 ! only bother if interrupt
! check if there are pending pkts, if any, notify guest
SET_SIZE(ldc_vintr_setstate)
* Wrapper around hv_ldc_send_pkt, so it can be called from C
* SPARC ABI requries only that g2,g3,g4 are preserved across
* void c_hvldc_send(int hv_endpt, void *payload)
STRAND_PUSH(%g2, %g6, %g7)
STRAND_PUSH(%g3, %g6, %g7)
STRAND_PUSH(%g4, %g6, %g7)
* Internal function to send a 64-byte LDC pkt to a guest.
* hv_ldc_send_pkt(channel, paddr)
* %g1 0 if success, else error value
* (g7 is return address from caller)
ENTRY_NP(hv_ldc_send_pkt)
ldx [%g4 + CONFIG_HV_LDCS], %g4
mulx %g1, LDC_ENDPOINT_SIZE, %g5
ldub [%g4 + LDC_IS_LIVE], %g5
brz,a,pn %g5, .ldc_send_ret
* find target guest and LDC target idx
ldx [%g4 + LDC_TARGET_GUEST], %g3
ldx [%g4 + LDC_TARGET_CHANNEL], %g4
setx GUEST_LDC_ENDPOINT, %g5, %g1
mulx %g4, LDC_ENDPOINT_SIZE, %g5
ldx [%g5 + LDC_RX_QSIZE], %g1
brz,a,pn %g1, .ldc_send_ret
* check if the target q is full
* if queue is full, wait until pkts
* get read from the queue
ldx [%g5 + LDC_RX_QSIZE], %g1
lduw [%g5 + LDC_RX_QTAIL], %g6
lduw [%g5 + LDC_RX_QHEAD], %g6
be,a,pn %xcc, .ldc_send_ret
* %g2 payload PA (modified)
* %g3 target cpu ptr (modified)
* %g4 target channel idx (modified)
* %g5 endpoint ptr (modified)
* %g1 0=success, else error value
* append data to the tail of an LDC RX queue and
* send cross call notification if necessary
* NOTE: prior to calling this macro, you must already have
* verified that there is indeed room available in the RX queue
* since this macro does not check for that.
lduw [%g5 + LDC_RX_QTAIL], %g6
ldx [%g5 + LDC_RX_QBASE_PA], %g1
lduw [%g5 + LDC_RX_QTAIL], %g2
ldx [%g5 + LDC_RX_QSIZE], %g1
lduw [%g5 + LDC_RX_QHEAD], %g6
stw %g1, [%g5 + LDC_RX_QTAIL]
bne,pn %xcc, .ldc_send_ret_ok ! if queue was non-empty, then we
nop ! don't need to send notification.
STRAND_PUSH(%g7, %g2, %g1)
HVCALL(hv_ldc_cpu_notify)
SET_SIZE(hv_ldc_send_pkt)
ldx [%g3 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g1
ldub [%g3 + LDC_RX_MAPREG + LDC_MAPREG_VALID], %g5
brz,pn %g5, 2f /* interrupt VALID? */
ld [%g3 + LDC_RX_MAPREG + LDC_MAPREG_STATE], %g5
cmp %g5, INTR_IDLE /* interrupt IDLE? */
add %g3, LDC_RX_MAPREG + LDC_MAPREG_STATE, %g4
casa [%g4]ASI_P, %g6, %g5
ldx [%g3 + LDC_RX_MAPREG + LDC_MAPREG_COOKIE], %g5
SET_SIZE(hv_ldc_cpu_notify)
* Wrapper around hv_ldc_cpu_notify so it can be called from C
* SPARC ABI requries only that g2,g3,g4 are preserved across
* void c_ldc_cpu_notify(ldc_endpoint_t *t_endpt)
STRAND_PUSH(%g2, %g6, %g7)
STRAND_PUSH(%g3, %g6, %g7)
STRAND_PUSH(%g4, %g6, %g7)
HVCALL(hv_ldc_cpu_notify)
SET_SIZE(c_ldc_cpu_notify)
#if CONFIG_FPGA /* { Support for LDC over the FPGA mailbox */
* Wrapper around LDC_SEND_SP_INTR so it can be called from C
* SPARC ABI requries only that %g2,%g3,%g4 are preserved across
* function calls and we don't use any of these registers here.
* %o1 = reason for interrupt
* void c_ldc_send_sp_intr(stuct *ldc_endpoint target, endpoint, int reason)
ENTRY(c_ldc_send_sp_intr)
LDC_SEND_SP_INTR(%g5, %g1, %g7, %g6)
SET_SIZE(c_ldc_send_sp_intr)