Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / common / src / ldc.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: ldc.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)ldc.s 1.14 07/07/17 SMI"
50
51/*
52 * LDC support functions
53 */
54
55#include "config.h"
56
57#include <sys/asm_linkage.h>
58#include <sys/htypes.h>
59#include <hypervisor.h>
60#include <sparcv9/asi.h>
61#include <sun4v/asi.h>
62#include <asi.h>
63#include <hprivregs.h>
64#include <sun4v/mmu.h>
65#include <sun4v/intr.h>
66#include <mmu.h>
67#ifdef CONFIG_FPGA
68#include <fpga.h>
69#endif
70#include <md.h>
71#include <debug.h>
72#include <offsets.h>
73#include <util.h>
74#include <ldc.h>
75#include <hvctl.h>
76#include <abort.h>
77
78/*
79 * Common block of code executed by all LDC package API
80 * calls to verify input value and fetch the pointer to
81 * the LDC endpoint.
82 *
83 * Input registers:
84 * ch_id (unmodified) - channel ID of LDC
85 * scr (modified) - scratch register
86 *
87 * Output registers:
88 * guest (modified) - returns current guest pointer
89 * endpoint (modified) - returns the ldc_endpoint pointer
90 *
91 */
92#define GET_LDC_ENDPOINT(ch_id, scr, guest, endpoint) \
93 GUEST_STRUCT(guest) ;\
94 set GUEST_LDC_MAX_CHANNEL_IDX, endpoint ;\
95 ldx [guest + endpoint], endpoint ;\
96 cmp ch_id, endpoint ;\
97 bgeu,pn %xcc, herr_invalchan /* is channel legit? */ ;\
98 nop ;\
99 mulx ch_id, LDC_ENDPOINT_SIZE, endpoint ;\
100 set GUEST_LDC_ENDPOINT, scr ;\
101 add endpoint, scr, endpoint ;\
102 add endpoint, guest, endpoint ;\
103 ldub [endpoint + LDC_IS_LIVE], scr ;\
104 brz,pn scr, herr_invalchan /* is channel live */ ;\
105 ldub [endpoint + LDC_IS_PRIVATE], scr ;\
106 brnz,pn scr, herr_invalchan /* is channel live */ ;\
107 nop
108
109/*
110 * Macro to calculate channel status for the target in a
111 * guest<->guest LDC link.
112 *
113 * Parameters:
114 * guest_endpt (unmodified) - guest endpoint stuct pointer
115 * status (return value) - Output: status of channel
116 * scr (modified) - scratch register
117 */
118#define GET_GUEST_QUEUE_STATUS(guest_endpt, status, scr) \
119 .pushlocals ;\
120 ldx [guest_endpt + LDC_TARGET_GUEST], status ;\
121 ldx [guest_endpt + LDC_TARGET_CHANNEL], scr ;\
122 mulx scr, LDC_ENDPOINT_SIZE, scr ;\
123 add status, scr, status ;\
124 set GUEST_LDC_ENDPOINT, scr ;\
125 add scr, status, status /* target endpoint */ ;\
126 ;\
127 ldub [status + LDC_RX_UPDATED], scr ;\
128 brnz,a scr, 1f /* if flag is set... */ ;\
129 stb %g0, [status + LDC_RX_UPDATED] /* then clear it */ ;\
1301: ;\
131 ldx [status + LDC_RX_QSIZE], status /* if qsize==0 then */ ;\
132 brz,a status, 0f /* status is DOWN */ ;\
133 mov LDC_CHANNEL_DOWN, status ;\
134 brz,a scr, 0f /* if qsize!=0 && updated=0 */ ;\
135 mov LDC_CHANNEL_UP, status /* then UP */ ;\
136 mov LDC_CHANNEL_RESET, status /* else RESET */ ;\
1370: ;\
138 .poplocals
139
140/*
141 * Macro to calculate channel status for the target in a
142 * guest<->SP LDC link.
143 *
144 * Parameters:
145 * guest_endpt (unmodified) - guest endpoint stuct pointer
146 * status (return value) - Output: status of channel
147 * scr (modified) - scratch register
148 */
149#define GET_SP_QUEUE_STATUS(guest_endpt, status, scr) \
150 .pushlocals ;\
151 ROOT_STRUCT(status) ;\
152 ldx [status + CONFIG_SP_LDCS], status ;\
153 ldx [guest_endpt + LDC_TARGET_CHANNEL], scr ;\
154 mulx scr, SP_LDC_ENDPOINT_SIZE, scr ;\
155 add status, scr, status /* target endpoint */ ;\
156 ldx [status + SP_LDC_TX_QD_PA], status /* QD ptr to SRAM */ ;\
157 ;\
158 ldub [status + SRAM_LDC_STATE_UPDATED], scr ;\
159 brnz,a scr, 1f /* if flag set, then */ ;\
160 stb %g0, [status + SRAM_LDC_STATE_UPDATED] /* clear it */ ;\
1611: ;\
162 ldub [status + SRAM_LDC_STATE], status /* status */ ;\
163 brz,a status, 0f /* DOWN = 0, UP = 1 */ ;\
164 mov LDC_CHANNEL_DOWN, status ;\
165 brz,a scr, 0f /* if status=UP && updated=0 */ ;\
166 mov LDC_CHANNEL_UP, status /* then UP */ ;\
167 mov LDC_CHANNEL_RESET, status /* else RESET */ ;\
1680: ;\
169 .poplocals
170
171/*
172 * hv_ldc_chk_pkts - Check channel for pending pkts
173 *
174 * Check if the specified endpoint has any pkts available
175 * in either its Rx queue or peer's Tx queue. If pending,
176 * deliver an mondo to the CPU associated with this endpt.
177 * Used by ldc_vintr_setstate and ldc_vintr_setvalid to
178 * to notify guest when interrupts are enabled.
179 *
180 * Parameters:
181 * %g1 endpoint being checked (modified)
182 * %g2 - %g6 scratch (modified)
183 */
184 ENTRY_NP(hv_ldc_chk_pkts)
185
186 lduw [%g1 + LDC_RX_QHEAD], %g2
187 lduw [%g1 + LDC_RX_QTAIL], %g3
188 cmp %g2, %g3 ! if queue empty (head==tail)
189 bne %xcc, .notify_guest ! check the transmit side
190 nop
191
192 ldub [%g1 + LDC_TARGET_TYPE], %g2
193 cmp %g2, LDC_GUEST_ENDPOINT
194 be,pt %xcc, .peer_is_guest
195 cmp %g2, LDC_SP_ENDPOINT
196 bne,pt %xcc, .no_notification
197 nop
198
199 ! Target is a SP endpoint
200 ! Read and compare the SRAM head and tail
201 ROOT_STRUCT(%g2)
202 ldx [%g2 + CONFIG_SP_LDCS], %g2 ! get SP endpoint array
203 ldx [%g1 + LDC_TARGET_CHANNEL], %g3 ! and target endpoint
204 mulx %g3, SP_LDC_ENDPOINT_SIZE, %g3
205 add %g2, %g3, %g2 ! and its struct
206
207 !! %g2 sp endpoint
208 ! quick check to see whether there are any packets
209 ! to grab on this channel.
210 ldx [%g2 + SP_LDC_RX_QD_PA], %g2
211 ldub [%g2 + SRAM_LDC_HEAD], %g3
212 ldub [%g2 + SRAM_LDC_TAIL], %g4
213 cmp %g3, %g4
214 bne,pn %xcc, .notify_guest
215 nop
216
217 HVRET
218 /*NOTREACHED*/
219
220.peer_is_guest:
221 ! Target is a guest endpoint
222 ldx [%g1 + LDC_TARGET_GUEST], %g2 ! find target guest
223 set GUEST_LDC_ENDPOINT, %g3
224 add %g2, %g3, %g2
225 ldx [%g1 + LDC_TARGET_CHANNEL], %g3 ! and it's endpoint
226 mulx %g3, LDC_ENDPOINT_SIZE, %g3
227 add %g2, %g3, %g2 ! target endpt struct
228
229 lduw [%g2 + LDC_TX_QHEAD], %g3 ! check if src has
230 lduw [%g2 + LDC_TX_QTAIL], %g4 ! anything pending for
231 cmp %g3, %g4 ! transmit
232 beq,pt %xcc, .no_notification
233 nop
234
235.notify_guest:
236 !! %g1 endpoint to deliver interrupt
237 add %g1, LDC_RX_MAPREG + LDC_MAPREG_STATE, %g4
238 set INTR_DELIVERED, %g5
239 set INTR_IDLE, %g6
240 casa [%g4]ASI_P, %g6, %g5
241 cmp %g5, INTR_IDLE
242 bne,a,pn %xcc, .no_notification
243 nop
244
245 ldx [%g1 + LDC_RX_MAPREG + LDC_MAPREG_COOKIE], %g3
246 ldx [%g1 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g1
247 brz,pn %g1, .no_notification
248 nop
249 !! %g1 target cpup
250 !! %g2 flag (1 = mondo)
251 !! %g3 data (cookie)
252 !! %g2 - %g6 trashed
253 ba send_dev_mondo ! tail call, returns to caller
254 mov 1, %g2
255
256.no_notification:
257 HVRET
258 SET_SIZE(hv_ldc_chk_pkts)
259
260
261/*
262 * ldc_tx_qconf
263 *
264 * arg0 channel (%o0)
265 * arg1 q base raddr (%o1) - must be aligned to size of queue
266 * arg2 size (#entries) (%o2) - must be power of 2 (or 0 to unconfigure queue)
267 * --
268 * ret0 status (%o0)
269 *
270 * Configure transmit queue for LDC endpoint.
271 * -
272 */
273 ENTRY_NP(hcall_ldc_tx_qconf)
274
275 ! verifies channel ID, returns pointers to guest and ldc_endpoint
276 !! %g1 guest
277 !! %g2 endpoint
278 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
279
280 brz,pn %o2, 2f ! size of 0 unconfigures queue
281 nop
282
283 ! number of entries must be a power of 2
284 sub %o2, 1, %g3
285 and %o2, %g3, %g3
286 brnz,pn %g3, herr_inval
287 nop
288
289 sllx %o2, Q_EL_SIZE_SHIFT, %g4 ! convert #entries to bytes
290
291 ! queue raddr must be aligned to size of queue
292 sub %g4, 1, %g5
293 btst %g5, %o1
294 bnz,pn %xcc, herr_badalign ! base addr not aligned ?
295 nop
296
297 RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, herr_noraddr, %g6, %g5)
298
299 ! Note: The guest can flush a TX queue by (re)configuring it.
300 ! If this happens, we still want to make sure that the the
301 ! head/tail pointer consistency is maintained between the two
302 ! guests and so we mark the queue empty without moving the
303 ! tail pointer. Note that the tail pointer is set to zero at
304 ! start of day.
305 !
306 ! We do, however, need to make sure the new tail value is not
307 ! larger than the size of the queue in case the guest is switching
308 ! to a smaller queue.
309
310 stx %o1, [%g2 + LDC_TX_QBASE_RA]
311 stx %g5, [%g2 + LDC_TX_QBASE_PA]
312 lduw [%g2 + LDC_TX_QTAIL], %g6 ! read existing tail
313 sub %g4, Q_EL_SIZE, %g5
314 sub %g5, %g6, %g5 ! bigger than qsize?
315 movrlz %g5, %g0, %g6 ! if so, we have to zero
316 brlz,a %g5, 1f ! the head and tail pointer
317 stw %g6, [%g2 + LDC_TX_QTAIL]
3181:
319 stw %g6, [%g2 + LDC_TX_QHEAD]
320
321 ! set queue size last if queue is being configured
322 stx %g4, [%g2 + LDC_TX_QSIZE]
323
324 HCALL_RET(EOK)
325
3262:
327 ! All we need to do is set the qsize to zero if the queue is
328 ! being unconfigured.
329 !
330 ! Note: we specifically no NOT clear the LDC_TX_QBASE_PA field
331 ! because doing so could introduce a security hole.
332 stx %g0, [%g2 + LDC_TX_QSIZE]
333
334 HCALL_RET(EOK)
335 SET_SIZE(hcall_ldc_tx_qconf)
336
337/*
338 * ldc_tx_qinfo
339 *
340 * arg0 channel (%o0)
341 * --
342 * ret0 status (%o0)
343 * ret1 q base raddr (%o1)
344 * ret2 size (#entries) (%o2)
345 *
346 * Return information about the LDC endpoint's transmit queue.
347 */
348 ENTRY_NP(hcall_ldc_tx_qinfo)
349
350 ! verifies channel ID, returns pointers to guest and ldc_endpoint
351 !! %g1 guest
352 !! %g2 endpoint
353 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
354
355 ldx [%g2 + LDC_TX_QBASE_RA], %o1
356 ldx [%g2 + LDC_TX_QSIZE], %g4
357 srlx %g4, Q_EL_SIZE_SHIFT, %o2
358 HCALL_RET(EOK)
359 SET_SIZE(hcall_ldc_tx_qinfo)
360
361/*
362 * ldc_tx_get_state
363 *
364 * arg0 channel (%o0)
365 * --
366 * ret0 status (%o0)
367 * ret1 head offset (%o1)
368 * ret2 tail offset (%o2)
369 * ret3 channel state (%o3)
370 *
371 * Return information about the current state of the queue.
372 */
373 ENTRY_NP(hcall_ldc_tx_get_state)
374
375 ! verifies channel ID, returns pointers to guest and ldc_endpoint
376 !! %g1 guest
377 !! %g2 endpoint
378 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
379
380 ldx [%g2 + LDC_TX_QSIZE], %g3 ! no Q configured ?
381 brz,pn %g3, herr_inval
382 nop
383
384 lduw [%g2 + LDC_TX_QHEAD], %o1
385 lduw [%g2 + LDC_TX_QTAIL], %o2
386
387 ldub [%g2 + LDC_TARGET_TYPE], %g1 ! is this endpoint connected to
388 cmp %g1, LDC_HV_ENDPOINT ! the a hypervisor endpoint?
389 be,a %xcc, 3f ! if so, assume channel is up
390 mov LDC_CHANNEL_UP, %o3
391
392 cmp %g1, LDC_GUEST_ENDPOINT ! is this endpoint connected to
393 be %xcc, 2f ! another guest endpoint?
394 nop
395
396 ! must be a guest<->sp connection
397 GET_SP_QUEUE_STATUS(%g2, %o3, %g1)
398 ba,a 3f
3992:
400 GET_GUEST_QUEUE_STATUS(%g2, %o3, %g1)
4013:
402 HCALL_RET(EOK)
403 SET_SIZE(hcall_ldc_tx_get_state)
404
405/*
406 * guest_to_guest_tx_set_tail
407 *
408 * %g1 - new tail value
409 * %g2 - sender's endpoint
410 *
411 * Incriments the guest TX tail pointer and sends notification to the RX
412 * guest if necessary.
413 *
414 * Note: It is important that the caller has already verifed that the
415 * new tail value is valid given the current state of the queue.
416 *
417 */
418 ENTRY_NP(guest_to_guest_tx_set_tail)
419
420 !! %g1 new tail value
421 !! %g2 sender's endpoint
422 stw %g1, [%g2 + LDC_TX_QTAIL]
423
424 ldx [%g2 + LDC_TARGET_GUEST], %g5 ! find target guest
425 ldx [%g2 + LDC_TARGET_CHANNEL], %g3 ! end it's endpoint
426 mulx %g3, LDC_ENDPOINT_SIZE, %g4
427 add %g5, %g4, %g4
428 set GUEST_LDC_ENDPOINT, %g6
429 add %g6, %g4, %g4
430 ldx [%g4 + LDC_RX_QSIZE], %g6 ! no Q configured ?
431 brz,pn %g6, .tx_set_tail_done ! no notification
432 nop
433
434 ! Just leave the data in our transmit queue for now. The recipient
435 ! will be responsible for pulling over the data into its receive
436 ! queue when the guest on that end makes a call to check its
437 ! receive queue head/tail pointers.
438
439 lduw [%g4 + LDC_RX_QHEAD], %g6
440 lduw [%g4 + LDC_RX_QTAIL], %g3
441 cmp %g6, %g3 ! only send and interrupt
442 bne %xcc, .tx_set_tail_done ! if the RX queue is empty.
443 nop
444
445 ! now see if we need to send an interrupt to the recipient
446 ldx [%g4 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g5
447 brnz,pn %g5, 1f
448 nop
449
450 ! if no target CPU specified, is there a vdev interrupt we
451 ! need to generate?
452 ldx [%g4 + LDC_RX_VINTR_COOKIE], %g5
453 brz,pn %g5, .tx_set_tail_done ! if not, we are done.
454 nop
455
456 mov %g5, %g1
457 STRAND_PUSH(%g7, %g5, %g6)
458 HVCALL(vdev_intr_generate)
459 STRAND_POP(%g7, %g5)
460
461 ba,a .tx_set_tail_done
4621:
463 !! %g4 recipient's endpoint
464 mov %g4, %g3
465
466 STRAND_PUSH(%g7, %g5, %g6)
467 HVCALL(hv_ldc_cpu_notify)
468 STRAND_POP(%g7, %g5)
469
470.tx_set_tail_done:
471
472 HVRET
473 SET_SIZE(guest_to_guest_tx_set_tail)
474
475
476/*
477 * ldc_tx_set_qtail
478 *
479 * arg0 channel (%o0)
480 * arg1 tail offset (%o1)
481 * --
482 * ret0 status (%o0)
483 *
484 * Used by the guest to send data packets down the channel.
485 */
486 ENTRY_NP(hcall_ldc_tx_set_qtail)
487
488 ! verifies channel ID, returns pointers to guest and ldc_endpoint
489 !! %g1 guest
490 !! %g2 endpoint
491 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
492
493 ! new tail offset must be aligned properly
494 andcc %o1, Q_EL_SIZE-1, %g0
495 bnz,pn %xcc, herr_badalign
496 nop
497
498 ! Transmit queue configured?
499 ldx [%g2 + LDC_TX_QSIZE], %g3
500 brz,pn %g3, herr_inval
501 nop
502
503 ! new tail offset must be within range
504 cmp %g3, %o1
505 bleu,pn %xcc, herr_inval ! offset bigger than Q or less than 0?
506 nop
507
508 ! verify new tail value makes sense with respect to the old head/tail
509 lduw [%g2 + LDC_TX_QHEAD], %g3
510 lduw [%g2 + LDC_TX_QTAIL], %g4
511 ldx [%g2 + LDC_TX_QBASE_PA], %g7 ! save this off for now
512 cmp %g4, %g3
513 bl %xcc, 1f
514 nop
515
516 ! tail >= head i.e queue data not yet wrapped or queue empty
517 ! verify ((new_tail > tail) || (new_tail < head))
518 cmp %o1, %g4
519 bg %xcc, 2f
520 cmp %o1, %g3
521 bl %xcc, 2f
522 nop
523 ba herr_inval ! invalid tail value
524 nop
5251:
526 ! tail < head i.e. queue data currently wraps around end of queue
527 ! verify ((new_tail > tail) && (new_tail < head))
528 cmp %o1, %g4
529 ble,pn %xcc, herr_inval
530 cmp %o1, %g3
531 bge,pn %xcc, herr_inval
532 nop
533
5342: ! input values verified
535
536 !! %g1 guest
537 !! %g2 endpoint
538 !! %g3 tx qhead
539 !! %g7 tx qbase PA
540
541 ! Check to see if the target is a
542 ! Guest domain or endpoint in HV or SP
543
544 ldub [%g2 + LDC_TARGET_TYPE], %g4
545 cmp %g4, LDC_GUEST_ENDPOINT
546 be,pt %xcc, .guest_target ! guest <-> guest connection
547 cmp %g4, LDC_HV_ENDPOINT
548 be,pt %xcc, .hv_target ! guest <-> hypervisor connection
549 nop
550
551 /*
552 * guest <-> SP connection
553 */
554 mov %o1, %g1
555
556 !! %g1 new tail value
557 !! %g2 sender's endpoint
558
559 HVCALL(guest_to_sp_tx_set_tail) ! clobbers all %g1,%g3-%g7
560
561 ba .ldc_tx_set_qtail_done
562 nop
563
564.hv_target:
565
566 !
567 ! guest <-> hypervisor connection
568 !
569
570 ! update tail pointer and invoke callback to process data
571
572 stw %o1, [%g2 + LDC_TX_QTAIL]
573
574.ldc_tx_set_qtail_hv:
575
576 !! %g1 guest
577 !! %g2 endpoint
578 !! %g3 tx qhead
579 !! %g7 tx qbase PA
580
581 ROOT_STRUCT(%g4)
582 ldx [%g4 + CONFIG_HV_LDCS], %g4 ! get HV endpoint array
583 ldx [%g2 + LDC_TARGET_CHANNEL], %g1 ! and target endpoint
584 mulx %g1, LDC_ENDPOINT_SIZE, %g5
585 add %g4, %g5, %g5 ! and its struct
586
587 ldx [%g5 + LDC_RX_CB], %g6 ! get the callback
588 brz,pn %g6, .ldc_tx_set_qtail_done ! if none, drop pkt
589 nop
590 ldx [%g5 + LDC_RX_CBARG], %g1 ! load the argument
591
592 add %g3, %g7, %g7 ! PA of the payload
593
594 ldx [%g2 + LDC_TX_QSIZE], %g5 ! each time we invoke the
595 dec Q_EL_SIZE, %g5 ! callback, it will consume one
596 add %g3, Q_EL_SIZE, %g3 ! element from the Q so we
597 and %g3, %g5, %g5 ! update the head pointer by
598 stw %g5, [%g2 + LDC_TX_QHEAD] ! one and store the new value.
599 mov %g7, %g2
600
601 !! %g1 call back arg
602 !! %g2 payload PA
603 !! %g6 callback
604 !! %g7 return addr
605
606 jmp %g6 ! invoke callback
607 rd %pc, %g7
608
609 ! Guest may have incrimented the tail pointer by more than one
610 ! element and so now we must check to see whether the queue is
611 ! empty. If not, we will have to invoke the callback again.
612
613 GUEST_STRUCT(%g1)
614 mulx %o0, LDC_ENDPOINT_SIZE, %g2
615 set GUEST_LDC_ENDPOINT, %g3
616 add %g2, %g3, %g2
617 add %g2, %g1, %g2
618
619 !! %g1 guest
620 !! %g2 endpoint
621
622 lduw [%g2 + LDC_TX_QHEAD], %g3
623 lduw [%g2 + LDC_TX_QTAIL], %g4
624 ldx [%g2 + LDC_TX_QBASE_PA], %g7 ! save this off for now
625 cmp %g3, %g4 ! Is Q empty now?
626 bne,pn %xcc, .ldc_tx_set_qtail_hv
627 nop
628
629 ! If Q is empty, we are done.
630 ba .ldc_tx_set_qtail_done
631 nop
632
633.guest_target:
634
635 !
636 ! guest <-> guest connection
637 !
638
639 mov %o1, %g1
640
641 !! %g1 new tail value
642 !! %g2 sender's endpoint
643
644 HVCALL(guest_to_guest_tx_set_tail) ! clobbers all %g1,%g3-%g7
645
646 !! %g2 sender's endpoint
647
648.ldc_tx_set_qtail_done:
649
650 HCALL_RET(EOK)
651 SET_SIZE(hcall_ldc_tx_set_qtail)
652
653/*
654 * ldc_rx_qconf
655 *
656 * arg0 channel (%o0)
657 * arg1 q base raddr (%o1) - must be aligned to size of queue
658 * arg2 size (#entries) (%o2) - must be power of 2 (or 0 to unconfigure queue)
659 * --
660 * ret0 status (%o0)
661 *
662 * Configure receive queue for LDC endpoint.
663 */
664 ENTRY_NP(hcall_ldc_rx_qconf)
665
666 ! verifies channel ID, returns pointers to guest and ldc_endpoint
667 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2) ! %g1 guest, %g2 endpoint
668
669 brz,pn %o2, 2f ! size of 0 unconfigures queue
670 nop
671
672 ! number of entries must be a power of 2
673 sub %o2, 1, %g3
674 and %o2, %g3, %g3
675 brnz,pn %g3, herr_inval
676 nop
677
678 sllx %o2, Q_EL_SIZE_SHIFT, %g4 ! convert #entries to bytes
679
680 ! queue raddr must be aligned to size of queue
681 sub %g4, 1, %g5
682 btst %g5, %o1
683 bnz,pn %xcc, herr_badalign ! base addr not aligned ?
684 nop
685
686 RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, herr_noraddr, %g6, %g5)
687
688 ! Note: The guest can flush a RX queue by (re)configuring it.
689 ! If this happens, we still want to make sure that the the
690 ! head/tail pointer consistency is maintained between the two
691 ! guests and so we mark the queue empty without moving the
692 ! tail pointer. Note that the tail pointer is set to zero at
693 ! start of day.
694 !
695 ! We do, however, need to make sure the new tail value is not
696 ! larger than the size of the queue in case the guest is switching
697 ! to a smaller queue.
698
699 stx %o1, [%g2 + LDC_RX_QBASE_RA]
700 stx %g5, [%g2 + LDC_RX_QBASE_PA]
701 lduw [%g2 + LDC_RX_QTAIL], %g6 ! read existing tail
702 sub %g4, Q_EL_SIZE, %g5
703 sub %g5, %g6, %g5 ! bigger than qsize?
704 movrlz %g5, %g0, %g6 ! if so, we have to zero
705 brlz,a %g5, 1f ! the head and tail pointer
706 stw %g6, [%g2 + LDC_RX_QTAIL]
7071:
708 stw %g6, [%g2 + LDC_RX_QHEAD]
7092:
710
711 ldub [%g2 + LDC_TARGET_TYPE], %g3
712 cmp %g3, LDC_SP_ENDPOINT
713
714#ifdef CONFIG_FPGA
715 ! args:
716 !! %o0 - arg0 channel
717 !! %o1 - arg1 q base raddr
718 !! %o2 - arg2 size (#entries)
719 !! %g2 - endpoint
720 !! %g4 - new RX_QSIZE value if arg2 != 0
721 be,pn %xcc, sp_ldc_update_link_status ! returns directly to guest
722 nop
723#else
724 be,pn %xcc, herr_inval ! should't be using SRAM LDC
725 nop
726#endif
727
728 brz,pn %o2, 3f ! new qsize of 0 unconfigures queue
729 nop
730
731 mov 1, %g5
732 ldx [%g2 + LDC_RX_QSIZE], %g6 ! read existing size
733 brnz,a %g6, 2f
734 stb %g5, [%g2 + LDC_RX_UPDATED] ! if size not zero, set updated
7352:
736 stx %g4, [%g2 + LDC_RX_QSIZE] ! set last if Q being configured
737
738 ba 4f ! notify
739 nop
7403:
741 ! All we need to do is set the qsize to zero if the queue is
742 ! being un-configured.
743 !
744 ! Note: we specifically no NOT clear the LDC_RX_QBASE_PA field
745 ! because doing so could introduce a security hole.
746
747 ldx [%g2 + LDC_RX_QSIZE], %g6 ! read existing size
748 stx %g0, [%g2 + LDC_RX_QSIZE]
749 brz,pn %g6, 5f ! if existing size=0, return
750 nop
751
752 mov 1, %g5
753 stb %g5, [%g2 + LDC_RX_UPDATED] ! else set updated, & notify
7544:
755 ldub [%g2 + LDC_TARGET_TYPE], %g3
756 cmp %g3, LDC_GUEST_ENDPOINT
757 bne,pt %xcc, 5f
758 nop
759
760 ldx [%g2 + LDC_TARGET_GUEST], %g4
761 ldx [%g2 + LDC_TARGET_CHANNEL], %g3
762 mulx %g3, LDC_ENDPOINT_SIZE, %g3
763 add %g3, %g4, %g3
764 set GUEST_LDC_ENDPOINT, %g6
765 add %g6, %g3, %g3 ! Target endpt struct
766
767 ldx [%g3 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g4
768 brz,pn %g4, 5f
769 nop
770
771 !! %g2 - this endpoint struct
772 !! %g3 - target endpoint struct
773 !! %g4 - target CPU
774 !! %g5 - scratch
775 !! %g6 - scratch
776 !
777 ! Notify the other end that this endpoint's
778 ! Rx queue was reconfigured
779 !
780 HVCALL(hv_ldc_cpu_notify)
7815:
782 HCALL_RET(EOK)
783 SET_SIZE(hcall_ldc_rx_qconf)
784
785/*
786 * ldc_rx_qinfo
787 *
788 * arg0 channel (%o0)
789 * --
790 * ret0 status (%o0)
791 * ret1 q base raddr (%o1)
792 * ret2 size (#entries) (%o2)
793 *
794 * Return information about the LDC endpoint's receive queue.
795 */
796 ENTRY_NP(hcall_ldc_rx_qinfo)
797
798 ! verifies channel ID, returns pointers to guest and ldc_endpoint
799 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2) ! %g1 guest, %g2 endpoint
800
801 ldx [%g2 + LDC_RX_QBASE_RA], %o1
802 ldx [%g2 + LDC_RX_QSIZE], %g4
803 srlx %g4, Q_EL_SIZE_SHIFT, %o2
804 HCALL_RET(EOK)
805 SET_SIZE(hcall_ldc_rx_qinfo)
806
807/*
808 * guest_to_guest_pull_data
809 *
810 * Input:
811 * %g2 - receiver's endpoint (preserved)
812 *
813 * Clobbers:
814 * %g1, %g3-7
815 *
816 * Pulls queue data (if available) from the target endpoint's TX queue
817 * into this specified endpoint's RX queue.
818 *
819 */
820 ENTRY_NP(guest_to_guest_pull_data)
821
822 !! %g2 our endpoint
823
824 ! We will need to clobber some additional registers so save them
825 STRAND_PUSH(%g7, %g3, %g4)
826 STRAND_PUSH(%o1, %g3, %g4)
827 STRAND_PUSH(%o2, %g3, %g4)
828 STRAND_PUSH(%o3, %g3, %g4)
829 STRAND_PUSH(%g2, %g3, %g4)
830
831 ldx [%g2 + LDC_TARGET_GUEST], %g1 ! find sender's guest
832 ldx [%g2 + LDC_TARGET_CHANNEL], %g3 ! and it's endpoint
833 mulx %g3, LDC_ENDPOINT_SIZE, %g4
834 add %g1, %g4, %g1
835 set GUEST_LDC_ENDPOINT, %g6
836 add %g6, %g1, %g1
837 ldx [%g1 + LDC_TX_QSIZE], %g6 ! no TX Q configured ? Then
838 brz,pn %g6, .done_copying_data ! there is no data to pull over
839 nop
840
841 ! limit each call to copying a certain number of packets so as to
842 ! not keep the CPU stuck in the hypervisor for too long.
843 set (LDC_MAX_PKT_COPY * Q_EL_SIZE), %g7
844
845.copy_more_data:
846
847 ! make sure we are not trying to send more packets than
848 ! allowed per hcall.
849 brlez %g7, .done_copying_data
850 nop
851
852 !! %g1 sender's endpoint
853 !! %g2 our endpoint
854
855 lduw [%g1 + LDC_TX_QHEAD], %g3
856 lduw [%g1 + LDC_TX_QTAIL], %g4
857
858 sub %g4, %g3, %g4 ! check (tail - head) value.
859 brz %g4, .done_copying_data ! if zero, nothing to copy
860 nop ! since TX Q is empty.
861
862 brgz %g4, 1f ! If non-negative, then that's
863 nop ! how many bytes we need to
864 ! copy from the TX Q.
865
866 ldx [%g1 + LDC_TX_QSIZE], %g5 ! Else, we need to copy
867 sub %g5, %g3, %g4 ! (size - head) bytes from TX Q
868
8691:
870 !! %g1 sender's endpoint
871 !! %g2 our endpoint
872 !! %g3 sender's head pointer
873 !! %g4 bytes of data to copy
874
875 lduw [%g2 + LDC_RX_QHEAD], %o1
876 lduw [%g2 + LDC_RX_QTAIL], %g6
877
878 sub %o1, %g6, %g5
879 sub %g5, Q_EL_SIZE, %g5
880 brgez %g5, 1f ! If non-negative, then that's
881 nop ! how many bytes we are able
882 ! to copy into our RX Q.
883
884 ldx [%g2 + LDC_RX_QSIZE], %g5 ! our current RX Q size
885 sub %g5, %g6, %g5
886 brnz %o1, 1f ! but we can't fill our Q
887 nop ! completely so we must
888 sub %g5, Q_EL_SIZE, %g5 ! subtract if head is zero.
8891:
890 brz %g5, .done_copying_data ! if zero, nothing to copy
891 nop ! since our RX Q is full.
892
893 !! %g1 sender's endpoint
894 !! %g2 our endpoint
895 !! %g3 sender's head pointer
896 !! %g4 bytes of data to copy (sender)
897 !! %g5 bytes of data to copy (receiver)
898 !! %g6 our tail pointer
899
900 ! find the lesser of the two copy size values
901 sub %g4, %g5, %o1
902 movrgez %o1, %g5, %g4
903
904 ! make sure we don't copy more packets than allowed per hcall
905 sub %g7, %g4, %g5
906 brgz,a %g5, 1f ! if we haven't yet sent the max allowed pkts,
907 mov %g5, %g7 ! then simply update our counter and continue.
908
909 ! trying to copy more packets than (or exactly as many packets as)
910 ! allowed per hcall.
911 mov %g7, %g4 ! limit the number of bytes about to be copied
912 clr %g7 ! update our counter
9131:
914 mov %g3, %o2 ! save off the original tx head
915 mov %g6, %o3 ! and rx tail values.
916
917 ldx [%g1 + LDC_TX_QBASE_PA], %o1
918 add %g3, %o1, %g3
919 ldx [%g2 + LDC_RX_QBASE_PA], %o1
920 add %g6, %o1, %g6
921
922 sub %g4, 8, %o1 ! use as loop index
923
9241:
925 ldx [%g3], %g5 ! read data from TX Q head
926 stx %g5, [%g6] ! write data to RX Q tail
927 add %g3, 8, %g3 ! incriment head pointer
928 sub %o1, 8, %o1
929 brgez,pt %o1, 1b ! loop until done.
930 add %g6, 8, %g6 ! incriment tail pointer
931
932 ! Now we need to update our head and tail pointers
933 ldx [%g2 + LDC_RX_QSIZE], %g5
934 dec Q_EL_SIZE, %g5
935 add %o3, %g4, %o3
936 and %o3, %g5, %g5
937 stw %g5, [%g2 + LDC_RX_QTAIL]
938
939 ldx [%g1 + LDC_TX_QSIZE], %g5
940 dec Q_EL_SIZE, %g5
941 add %o2, %g4, %o2
942 and %o2, %g5, %g5
943 stw %g5, [%g1 + LDC_TX_QHEAD]
944
945 !! %g1 sender's endpoint
946 !! %g2 our endpoint
947 !! %g4 bytes of data that were copied
948
949 ba,a .copy_more_data
950
951.done_copying_data:
952
953 !! %g1 sender's endpoint
954 !! %g2 our endpoint
955
956 ! We might need to send a 'queue no longer full' interrupt
957 ! in certain situations.
958 ldub [%g1 + LDC_TXQ_FULL], %g3
959 brz,pt %g3, 1f
960 nop
961 stb %g0, [%g1 + LDC_TXQ_FULL]
962
963 ldx [%g1 + LDC_RX_VINTR_COOKIE], %g1
964 brz %g1, 1f
965 nop
966
967 HVCALL(vdev_intr_generate)
9681:
969 ! Restore registers we were not supposed to clobber.
970 STRAND_POP(%g2, %g3)
971 STRAND_POP(%o3, %g3)
972 STRAND_POP(%o2, %g3)
973 STRAND_POP(%o1, %g3)
974 STRAND_POP(%g7, %g3)
975
976 HVRET
977 SET_SIZE(guest_to_guest_pull_data)
978
979/*
980 * ldc_rx_get_state
981 *
982 * arg0 channel (%o0)
983 * --
984 * ret0 status (%o0)
985 * ret1 head offset (%o1)
986 * ret2 tail offset (%o2)
987 * ret3 channel state (%o3)
988 *
989 * Return information about the current state of the queue.
990 */
991 ENTRY_NP(hcall_ldc_rx_get_state)
992
993 ! verifies channel ID, returns pointers to guest and ldc_endpoint
994 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2) ! %g1 guest, %g2 endpoint
995
996 ldx [%g2 + LDC_RX_QSIZE], %g3 ! no Q configured ?
997 brz,pn %g3, herr_inval
998 nop
999
1000 ! At this point (if the other end is a guest or SP) we want to go
1001 ! and check the other transmit queue and see if there is any data
1002 ! to pull into our recieve queue.
1003
1004 ldub [%g2 + LDC_TARGET_TYPE], %g3
1005 cmp %g3, LDC_HV_ENDPOINT
1006 be,a,pn %xcc, 3f
1007 mov LDC_CHANNEL_UP, %o3
1008 cmp %g3, LDC_SP_ENDPOINT
1009 be %xcc, 2f
1010 nop
1011
1012 !! %g2 guest endpoint (preserved)
1013 HVCALL(guest_to_guest_pull_data) ! clobbers all %g1,%g3-%g7
1014
1015 GET_GUEST_QUEUE_STATUS(%g2, %o3, %g1)
1016 ba 3f
1017 nop
10182:
1019 !! %g2 guest endpoint (preserved)
1020 HVCALL(sp_to_guest_pull_data) ! clobbers all %g1,%g3-%g7
1021
1022 GET_SP_QUEUE_STATUS(%g2, %o3, %g4)
1023
10243:
1025 lduw [%g2 + LDC_RX_QHEAD], %o1
1026 lduw [%g2 + LDC_RX_QTAIL], %o2
1027
1028 HCALL_RET(EOK)
1029 SET_SIZE(hcall_ldc_rx_get_state)
1030
1031
1032/*
1033 * ldc_rx_set_qhead
1034 *
1035 * arg0 channel (%o0)
1036 * arg1 head offset (%o1)
1037 * --
1038 * ret0 status (%o0)
1039 *
1040 * Used by the guest to indicate that it has received the packet(s).
1041 */
1042 ENTRY_NP(hcall_ldc_rx_set_qhead)
1043
1044 ! verifies channel ID, returns pointers to guest and ldc_endpoint
1045 !! %g1 guest
1046 !! %g2 endpoint
1047 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g2)
1048
1049 ! new head offset must be aligned properly
1050 andcc %o1, Q_EL_SIZE-1, %g0
1051 bnz,pn %xcc, herr_badalign
1052 nop
1053
1054 ! Receive queue configured?
1055 ldx [%g2 + LDC_RX_QSIZE], %g3
1056 brz,pn %g3, herr_inval
1057 nop
1058
1059 ! new head offset must be within range
1060 cmp %g3, %o1
1061 bleu,pn %xcc, herr_inval ! offset bigger than Q or less than 0?
1062 nop
1063
1064 !! %g1 guest
1065 !! %g2 our endpoint
1066 !! %g3 our Q size
1067
1068 ! verify new head value makes sense with respect to the old head/tail
1069 lduw [%g2 + LDC_RX_QHEAD], %g3
1070 lduw [%g2 + LDC_RX_QTAIL], %g4
1071 cmp %g3, %g4
1072 bl %xcc, 1f
1073 nop
1074
1075 ! head > tail i.e queue data currently wraps around end of queue
1076 ! verify ((new_head > head) || (new_head <= tail))
1077 cmp %o1, %g3
1078 bg %xcc, 2f
1079 cmp %o1, %g4
1080 ble %xcc, 2f
1081 nop
1082 ba herr_inval ! invalid head value
1083 nop
1084
10851: ! tail >= head i.e. queue data not yet wrapped or queue empty
1086 ! verify ((new_head > head) && (new_head <= tail))
1087 cmp %o1, %g3
1088 ble,pn %xcc, herr_inval
1089 cmp %o1, %g4
1090 bg,pn %xcc, herr_inval
1091 nop
1092
10932: ! input values verified
1094
1095 !! %g1 guest
1096 !! %g2 our endpoint
1097 !! %g3 initial head
1098
1099 stw %o1, [%g2 + LDC_RX_QHEAD]
1100
1101 HCALL_RET(EOK)
1102 SET_SIZE(hcall_ldc_rx_set_qhead)
1103
1104
1105#ifdef CONFIG_FPGA
1106
1107/*
1108 * sp_ldc_update_link_status
1109 *
1110 * arg0 channel (%o0)
1111 * arg1 q base raddr (%o1)
1112 * arg2 size (#entries) (%o2)
1113 * --
1114 * ret0 status (%o0)
1115 *
1116 * Additionally, we are passed the following arguments:
1117 *
1118 * %g2 endpoint
1119 * %g4 new RX_QSIZE value if arg2 != 0
1120 *
1121 * Called by the rx_qconf API routine for guest<->sp connections. The guest's
1122 * head/tail pointers have already been updated at this point. The only
1123 * things left to do here is:
1124 *
1125 * - update the rx_qsize field for this endpoint based on %o2/%g4
1126 * - update the SRAM link status fields and possibly send interrupt to SP
1127 * - return directly to guest
1128 *
1129 * N.B. It is important that we return directly to the guest from this
1130 * routine (using HCALL_RET or the like). %g7 does not contain a return
1131 * value to which we can branch.
1132 */
1133 ENTRY_NP(sp_ldc_update_link_status)
1134
1135 !! %g2 guest endpoint
1136 !! %g4 new rx_qsize value
1137
1138 ROOT_STRUCT(%g1)
1139 ldx [%g1 + CONFIG_SP_LDCS], %g1
1140 ldx [%g2 + LDC_TARGET_CHANNEL], %g3
1141 mulx %g3, SP_LDC_ENDPOINT_SIZE, %g3
1142 add %g1, %g3, %g1 ! target endpoint
1143 ldx [%g1 + SP_LDC_RX_QD_PA], %g1 ! QD ptr to SRAM
1144
1145 !! %g1 SRAM QD
1146 !! %g2 guest endpoint
1147 !! %g4 new rx_qsize value
1148
1149 brz,pn %o2, 2f ! was guest trying to un-configure the queue?
1150 nop
1151
1152 mov 1, %g3
1153 ldx [%g2 + LDC_RX_QSIZE], %g6 ! reflects our link status
1154 brnz,a %g6, 1f ! if status is not DOWN
1155 stb %g3, [%g1 + SRAM_LDC_STATE_UPDATED] ! then set "updated" flag
11561:
1157 stx %g4, [%g2 + LDC_RX_QSIZE] ! Store the new qsize value
1158 stb %g3, [%g1 + SRAM_LDC_STATE] ! link status = UP
1159
1160 ba 3f ! send notification to SP
1161 nop
11622:
1163 ! All we need to do is set the qsize to zero if the queue is
1164 ! being un-configured.
1165 !
1166 ! Note: we specifically no NOT clear the LDC_RX_QBASE_PA field
1167 ! because doing so could introduce a security hole.
1168
1169 ldx [%g2 + LDC_RX_QSIZE], %g3 ! read existing size
1170 stb %g0, [%g1 + SRAM_LDC_STATE] ! link status = DOWN
1171 stx %g0, [%g2 + LDC_RX_QSIZE]
1172 brz,pn %g3, 5f ! if existing size=0, return
1173 nop ! without sending interrupt
1174
1175 mov 1, %g3 ! set the "updated" flag
1176 stb %g3, [%g1 + SRAM_LDC_STATE_UPDATED]
1177
11783:
1179 !! %g1 SRAM QD
1180 !! %g2 guest endpoint
1181 !! %g3 1
1182
1183 stb %g3, [%g1 + SRAM_LDC_STATE_NOTIFY]
1184
1185 ! Send notification interrupt to the SP
1186 ! %g2 target endpoint (clobbered)
1187 LDC_SEND_SP_INTR(%g2, %g3, %g4, SP_LDC_STATE_CHG)
11885:
1189 HCALL_RET(EOK)
1190 SET_SIZE(sp_ldc_update_link_status)
1191
1192
1193/*
1194 * guest_to_sp_tx_set_tail
1195 *
1196 * %g1 new tail value
1197 * %g2 sender's endpoint
1198 *
1199 * Increments the guest TX tail pointer and sends notification to the
1200 * SP if necessary.
1201 *
1202 * Note: It is important that the caller has already verifed that the
1203 * new tail value is valid given the current state of the queue.
1204 *
1205 */
1206 ENTRY_NP(guest_to_sp_tx_set_tail)
1207
1208 !! %g1 new tail value
1209 !! %g2 sender's endpoint
1210
1211 ROOT_STRUCT(%g3)
1212 ldx [%g3 + CONFIG_SP_LDCS], %g3 ! get SP endpoint array
1213 ldx [%g2 + LDC_TARGET_CHANNEL], %g4 ! and target endpoint
1214 mulx %g4, SP_LDC_ENDPOINT_SIZE, %g5
1215 add %g3, %g5, %g3 ! and its struct
1216
1217 stw %g1, [%g2 + LDC_TX_QTAIL] ! update the tail
1218
1219 STRAND_PUSH(%g2, %g4, %g5) ! save off pointer
1220
1221 !! %g2 guest endpoint
1222 !! %g3 sp endpoint
1223
1224 add %g3, SP_LDC_TX_LOCK, %g5
1225 SPINLOCK_ENTER(%g5, %g6, %g4)
1226
1227 STRAND_PUSH(%g7, %g4, %g5)
1228 HVCALL(sram_ldc_push_data) !! %g3 (sp endpoint) preserved
1229 STRAND_POP(%g7, %g5)
1230
1231 add %g3, SP_LDC_TX_LOCK, %g5
1232 SPINLOCK_EXIT(%g5)
1233
1234 !! %g2 send interrupt flag
1235 !! %g3 sp endpoint
1236
1237 brz %g2, 1f ! skip notification if flag is clear
1238 nop
1239
1240 ! %g3 target endpoint (clobbered)
1241 LDC_SEND_SP_INTR(%g3, %g1, %g4, SP_LDC_DATA)
12421:
1243
1244 STRAND_POP(%g2, %g4) ! restore pointer
1245
1246 !! %g2 sender's endpoint
1247
1248 HVRET
1249 SET_SIZE(guest_to_sp_tx_set_tail)
1250
1251
1252/*
1253 * sp_to_guest_pull_data
1254 *
1255 * Input:
1256 * %g2 guest (receiver) endpoint (preserved)
1257 *
1258 * Clobbers:
1259 * %g1, %g3-7
1260 *
1261 * Pulls queue data (if available) from the target SP endpoint's TX queue
1262 * into the specified guest endpoint's RX queue.
1263 *
1264 */
1265 ENTRY_NP(sp_to_guest_pull_data)
1266
1267 !! %g2 guest endpoint
1268
1269 ! save off our return %pc value
1270 STRAND_PUSH(%g7, %g3, %g4)
1271
1272 ROOT_STRUCT(%g1)
1273 ldx [%g1 + CONFIG_SP_LDCS], %g1 ! get SP endpoint array
1274 ldx [%g2 + LDC_TARGET_CHANNEL], %g4 ! and target endpoint
1275 mulx %g4, SP_LDC_ENDPOINT_SIZE, %g5
1276 add %g1, %g5, %g1 ! and its struct
1277
1278 !! %g1 sp endpoint
1279 !! %g2 guest endpoint
1280
1281 ! quick check to see whether there are any packets
1282 ! to grab on this channel.
1283
1284 ldx [%g1 + SP_LDC_RX_QD_PA], %g3
1285 ldub [%g3 + SRAM_LDC_HEAD], %g4
1286 ldub [%g3 + SRAM_LDC_TAIL], %g5
1287 cmp %g4, %g5
1288 be %xcc, 2f
1289 nop
1290
1291 clr %g6 ! "send ACK" flag
1292
1293 !! %g1 sp endpoint
1294 !! %g2 guest endpoint
1295 !! %g6 "send ACK" flag
1296
1297 add %g1, SP_LDC_RX_LOCK, %g3 ! PA of endpoint lock
1298 SPINLOCK_ENTER(%g3, %g4, %g5)
1299
1300 ! snapshot queue state into our scratch register area
1301 ! since we will be copying the data in possibly several
1302 ! passes and we want to ensure the guest cannot cause any
1303 ! HV corruption by reconfiguring the queue while we are
1304 ! executing this routine.
1305
1306 ldx [%g1 + SP_LDC_RX_QD_PA], %g5
1307 ldub [%g5 + SRAM_LDC_HEAD], %g3
1308 LDC_SRAM_IDX_TO_OFFSET(%g3)
1309 stw %g3, [%g1 + SP_LDC_RX_SCR_TXHEAD] ! TX head
1310 ldub [%g5 + SRAM_LDC_TAIL], %g3
1311 LDC_SRAM_IDX_TO_OFFSET(%g3)
1312 stw %g3, [%g1 + SP_LDC_RX_SCR_TXTAIL] ! TX tail
1313 set (SRAM_LDC_QENTRY_SIZE * SRAM_LDC_ENTRIES_PER_QUEUE), %g3
1314 stx %g3, [ %g1 + SP_LDC_RX_SCR_TXSIZE ] ! TX size
1315#ifdef CONFIG_SPLIT_SRAM
1316 stx %g5, [ %g1 + SP_LDC_RX_SCR_TX_QDPA ] ! TX queue data PA
1317 ldx [ %g1 + SP_LDC_RX_Q_DATA_PA ], %g5
1318#endif
1319 stx %g5, [ %g1 + SP_LDC_RX_SCR_TX_QPA ] ! TX queue base PA
1320
1321 !! %g1 SP endpoint
1322 !! %g2 guest endpoint
1323
1324 stx %g2, [%g1 + SP_LDC_RX_SCR_TARGET]
1325
1326 lduw [%g2 + LDC_RX_QTAIL], %g4
1327 stw %g4, [%g1 + SP_LDC_RX_SCR_RXTAIL] ! RX tail
1328 lduw [%g2 + LDC_RX_QHEAD], %g4
1329 stw %g4, [%g1 + SP_LDC_RX_SCR_RXHEAD] ! RX head
1330 ldx [%g2 + LDC_RX_QSIZE], %g4
1331 stx %g4, [%g1 + SP_LDC_RX_SCR_RXSIZE] ! RX size
1332 ldx [%g2 + LDC_RX_QBASE_PA], %g4
1333 stx %g4, [%g1 + SP_LDC_RX_SCR_RX_QPA] ! RX queue base PA
1334
1335.copy_more_from_sram:
1336
1337 !! %g1 sp endpoint
1338
1339 lduw [%g1 + SP_LDC_RX_SCR_TXHEAD], %g2
1340 lduw [%g1 + SP_LDC_RX_SCR_TXTAIL], %g3
1341 ldx [%g1 + SP_LDC_RX_SCR_TXSIZE], %g4
1342 LDC_QUEUE_DATA_AVAILABLE(%g2, %g3, %g4)
1343 LDC_SRAM_OFFSET_TO_IDX(%g3)
1344
1345 !! %g1 sp endpoint
1346 !! %g2 TX head offset
1347 !! %g3 packets of data to copy
1348
1349 lduw [%g1 + SP_LDC_RX_SCR_RXHEAD], %g4
1350 lduw [%g1 + SP_LDC_RX_SCR_RXTAIL], %g5
1351 ldx [%g1 + SP_LDC_RX_SCR_RXSIZE], %g7
1352 LDC_QUEUE_SPACE_AVAILABLE(%g4, %g5, %g7, Q_EL_SIZE)
1353 LDC_OFFSET_TO_IDX(%g4)
1354
1355 !! %g1 sp endpoint
1356 !! %g2 TX head offset
1357 !! %g3 packets of data to copy
1358 !! %g4 packets of available space
1359 !! %g5 RX tail offset
1360
1361 ! find the lesser of the two copy size values
1362 sub %g3, %g4, %g7
1363 movrgez %g7, %g4, %g3
1364
1365 ! must have at least one LDC packet to copy, otherwise we are done.
1366 brlez %g3, .done_copy_from_sram
1367 nop
1368
1369 mov 1, %g6 ! "send ACK" flag
1370
1371 !! %g1 sp endpoint
1372 !! %g2 TX head offset
1373 !! %g3 packets of data to copy
1374 !! %g5 RX tail offset
1375
1376 ldx [%g1 + SP_LDC_RX_SCR_TX_QPA], %g4
1377 add %g2, %g4, %g2 ! PA of TX queue data
1378 ldx [%g1 + SP_LDC_RX_SCR_RX_QPA], %g4
1379 add %g5, %g4, %g5 ! PA of RX queue tail
1380
1381 !! %g1 sp endpoint
1382 !! %g2 TX head PA
1383 !! %g3 packets of data to copy
1384 !! %g5 RX tail PA
1385
1386 sub %g3, 1, %g3 ! use as loop index
13871:
1388 LDC_COPY_PKT_FROM_SRAM(%g2, %g5, %g4, %g7)
1389 brgz %g3, 1b
1390 dec %g3
1391
1392 !! %g1 sp endpoint
1393 !! %g2 new TX head PA
1394 !! %g5 new RX tail PA
1395
1396 ! Now we need to update our scratchpad head and tail pointers
1397 ldx [%g1 + SP_LDC_RX_SCR_TX_QPA], %g7
1398 sub %g2, %g7, %g2 ! New TX head offset
1399 ldx [%g1 + SP_LDC_RX_SCR_TXSIZE], %g7
1400 cmp %g2, %g7
1401 move %xcc, 0, %g2 ! check for wrap around
1402 stw %g2, [%g1 + SP_LDC_RX_SCR_TXHEAD]
1403
1404 ldx [%g1 + SP_LDC_RX_SCR_RX_QPA], %g7
1405 sub %g5, %g7, %g5 ! New RX tail offset
1406 ldx [%g1 + SP_LDC_RX_SCR_RXSIZE], %g7
1407 cmp %g5, %g7
1408 move %xcc, 0, %g5 ! check for wrap around
1409 stw %g5, [%g1 + SP_LDC_RX_SCR_RXTAIL]
1410
1411 !! %g1 sp endpoint
1412
1413 ba .copy_more_from_sram
1414 nop
1415
1416.done_copy_from_sram:
1417
1418 !! %g1 sp endpoint
1419
1420#ifdef CONFIG_SPLIT_SRAM
1421 ldx [ %g1 + SP_LDC_RX_SCR_TX_QDPA ], %g4 ! queue data PA
1422#else
1423 ldx [ %g1 + SP_LDC_RX_SCR_TX_QPA ], %g4 ! queue base PA
1424#endif
1425 lduw [ %g1 + SP_LDC_RX_SCR_TXHEAD ], %g3
1426 LDC_SRAM_OFFSET_TO_IDX(%g3)
1427 stb %g3, [%g4 + SRAM_LDC_HEAD] ! commit the new TX head
1428
1429 ldx [%g1 + SP_LDC_RX_SCR_TARGET], %g2
1430 lduw [%g1 + SP_LDC_RX_SCR_RXTAIL], %g5
1431 stw %g5, [%g2 + LDC_RX_QTAIL] ! commit the new RX tail
1432
1433 add %g1, SP_LDC_RX_LOCK, %g5 ! PA of endpoint lock
1434 SPINLOCK_EXIT(%g5)
1435
1436 ! Send the SP an ACK if we have pulled data from the SRAM
1437 brz %g6, 2f
1438 nop
1439
1440 mov %g1, %g6
1441 ! %g6 target endpoint (clobbered)
1442 LDC_SEND_SP_INTR(%g6, %g3, %g4, SP_LDC_SPACE)
1443
14442:
1445 !! %g1 sender's (sp) endpoint
1446 !! %g2 our endpoint
1447
1448 ! restore our return %pc value
1449 STRAND_POP(%g7, %g3)
1450
1451 HVRET
1452 SET_SIZE(sp_to_guest_pull_data)
1453
1454/*
1455 * sram_ldc_push_data
1456 *
1457 * Routine to send as much data as possible from a guest's TX queue
1458 * into the corresponding SRAM RX queue.
1459 *
1460 * NOTE: caller must own the SP endpoint TX lock before calling this
1461 * routine
1462 *
1463 * Inputs:
1464 * %g2 guest endpoint (modified)
1465 * %g3 sp endpoint (unmodified)
1466 * %g7 return %pc value (unmodified)
1467 *
1468 * Output:
1469 * %g2 '1' if interrupt notification is required, 0 otherwise.
1470 *
1471 * Clobbers all globals except %g3 and %g7
1472*/
1473 ENTRY_NP(sram_ldc_push_data)
1474
1475 !! %g2 guest endpoint
1476 !! %g3 sp endpoint
1477
1478 lduw [%g2 + LDC_TX_QTAIL], %g4
1479 stw %g4, [%g3 + SP_LDC_TX_SCR_TXTAIL] ! TX tail
1480 lduw [%g2 + LDC_TX_QHEAD], %g4
1481 stw %g4, [%g3 + SP_LDC_TX_SCR_TXHEAD] ! TX head
1482 ldx [%g2 + LDC_TX_QSIZE], %g4
1483 stx %g4, [%g3 + SP_LDC_TX_SCR_TXSIZE] ! TX size
1484 ldx [%g2 + LDC_TX_QBASE_PA], %g4
1485 stx %g4, [%g3 + SP_LDC_TX_SCR_TX_QPA] ! TX q base PA
1486 ldx [%g3 + SP_LDC_TX_QD_PA], %g4
1487 ldub [%g4 + SRAM_LDC_HEAD], %g5
1488 LDC_SRAM_IDX_TO_OFFSET(%g5)
1489 stw %g5, [%g3 + SP_LDC_TX_SCR_RXHEAD] ! RX head
1490 ldub [%g4 + SRAM_LDC_TAIL], %g5
1491 LDC_SRAM_IDX_TO_OFFSET(%g5)
1492 stw %g5, [%g3 + SP_LDC_TX_SCR_RXTAIL] ! RX tail
1493 set (SRAM_LDC_QENTRY_SIZE*SRAM_LDC_ENTRIES_PER_QUEUE), %g5
1494 stx %g5, [ %g3 + SP_LDC_TX_SCR_RXSIZE ] ! RX size
1495#ifdef CONFIG_SPLIT_SRAM
1496 stx %g4, [ %g3 + SP_LDC_TX_SCR_RX_QDPA ] ! RX qd data PA
1497 ldx [ %g3 + SP_LDC_TX_Q_DATA_PA ], %g4
1498#endif
1499 stx %g4, [ %g3 + SP_LDC_TX_SCR_RX_QPA ] ! RX q base PA
1500 stx %g2, [ %g3 + SP_LDC_TX_SCR_TARGET ]
15011:
1502 !! %g3 sp endpoint
1503
1504 lduw [%g3 + SP_LDC_TX_SCR_TXHEAD], %g4
1505 lduw [%g3 + SP_LDC_TX_SCR_TXTAIL], %g6
1506 ldx [%g3 + SP_LDC_TX_SCR_TXSIZE], %g2
1507 LDC_QUEUE_DATA_AVAILABLE(%g4, %g6, %g2)
1508 LDC_OFFSET_TO_IDX(%g6)
1509
1510 !! %g3 sp endpoint
1511 !! %g4 TX head pointer
1512 !! %g6 packets of data to copy
1513
1514 lduw [%g3 + SP_LDC_TX_SCR_RXHEAD], %g5
1515 lduw [%g3 + SP_LDC_TX_SCR_RXTAIL], %g1
1516 ldx [%g3 + SP_LDC_TX_SCR_RXSIZE], %g2
1517 LDC_QUEUE_SPACE_AVAILABLE(%g5, %g1, %g2, SRAM_LDC_QENTRY_SIZE)
1518 LDC_SRAM_OFFSET_TO_IDX(%g5)
1519
1520 !! %g1 RX tail pointer
1521 !! %g3 sp endpoint
1522 !! %g4 TX head pointer
1523 !! %g5 packets of space available
1524 !! %g6 packets of data to copy
1525
1526 /*
1527 * find the lesser of the two copy size values
1528 */
1529 sub %g6, %g5, %g2
1530 movrgez %g2, %g5, %g6
1531
1532 /*
1533 * must have at least one LDC packet to copy,
1534 * otherwise we are done.
1535 */
1536 brlez %g6, 3f
1537 nop
1538 ldx [%g3 + SP_LDC_TX_SCR_TX_QPA], %g5
1539 add %g4, %g5, %g4 ! PA of TX queue data
1540 ldx [%g3 + SP_LDC_TX_SCR_RX_QPA], %g5
1541 add %g1, %g5, %g1 ! PA of RX queue tail
1542
1543 !! %g1 RX tail PA
1544 !! %g3 sp endpoint
1545 !! %g4 TX head PA
1546 !! %g6 packets of data to copy
1547 sub %g6, 1, %g6 ! use as loop index
15482:
1549 LDC_COPY_PKT_TO_SRAM(%g4, %g1, %g5, %g2) ! moves pointers
1550 brgz %g6, 2b
1551 dec %g6
1552
1553 !! %g1 new RX tail PA
1554 !! %g3 sp endpoint
1555 !! %g4 new TX head PA
1556
1557 /*
1558 * Now we need to update our scratchpad head/tail pointers
1559 */
1560 ldx [%g3 + SP_LDC_TX_SCR_TX_QPA], %g5
1561 sub %g4, %g5, %g4 ! New TX head offset
1562 ldx [%g3 + SP_LDC_TX_SCR_TXSIZE], %g5
1563 cmp %g4, %g5
1564 move %xcc, 0, %g4 ! check for wrap around
1565 stw %g4, [%g3 + SP_LDC_TX_SCR_TXHEAD]
1566 ldx [%g3 + SP_LDC_TX_SCR_RX_QPA], %g5
1567 sub %g1, %g5, %g1 ! New RX tail offset
1568 ldx [%g3 + SP_LDC_TX_SCR_RXSIZE], %g5
1569 cmp %g1, %g5
1570 move %xcc, 0, %g1 ! check for wrap around
1571 stw %g1, [%g3 + SP_LDC_TX_SCR_RXTAIL]
1572
1573 ba 1b
1574 nop
15753:
1576 ldx [%g3 + SP_LDC_TX_SCR_TARGET], %g2
1577
1578 !! %g2 guest endpoint
1579 !! %g3 sp endpoint
1580
1581 /*
1582 * Write new TX head and RX tail values and see whether we
1583 * need to send the SP notification
1584 * We only send notification if the RX queue was empty. The
1585 * algorithm we use to avoid missed interrupts is as
1586 * follows:
1587 * Read orig RX head (orig_rx_hd)
1588 * Read orig RX tail (orig_rx_tl)
1589 * Write new RX tail value
1590 * Read (possibly) new RX head (new_rx_hd)
1591 * if ((orig_rx_hd==orig_rx_tl)||(new_rx_hd==orig_rx_tl)) {
1592 * notify SP
1593 * }
1594 */
1595 lduw [ %g3 + SP_LDC_TX_SCR_TXHEAD ], %g6
1596 stw %g6, [ %g2 + LDC_TX_QHEAD ] ! commit new TX head
1597#ifdef CONFIG_SPLIT_SRAM
1598 ldx [ %g3 + SP_LDC_TX_SCR_RX_QDPA ], %g4 ! queue data PA
1599#else
1600 ldx [ %g3 + SP_LDC_TX_SCR_RX_QPA ], %g4 ! queue base PA
1601#endif
1602 ldub [ %g4 + SRAM_LDC_HEAD ], %g5
1603 LDC_SRAM_IDX_TO_OFFSET(%g5)
1604 ldub [%g4 + SRAM_LDC_TAIL], %g1
1605 LDC_SRAM_IDX_TO_OFFSET(%g1)
1606
1607 !! %g1 old RX tail
1608 !! %g3 sp endpoint
1609 !! %g4 RX queue descriptor
1610 !! %g5 old RX head
1611
1612 lduw [%g3 + SP_LDC_TX_SCR_RXTAIL], %g6
1613 LDC_SRAM_OFFSET_TO_IDX(%g6)
1614 stb %g6, [%g4 + SRAM_LDC_TAIL] ! commit new RX tail
1615 ldub [%g4 + SRAM_LDC_HEAD], %g4
1616 LDC_SRAM_IDX_TO_OFFSET(%g4)
1617
1618 !! %g1 old RX tail
1619 !! %g3 sp endpoint
1620 !! %g4 new RX head
1621 !! %g5 old RX head
1622 !! %g6 new RX tail
1623
1624 clr %g2
1625 cmp %g1, %g5 ! (orig_rx_tl == orig_rx_hd) ?
1626 move %xcc, 1, %g2
1627 cmp %g1, %g4 ! (orig_rx_tl == new_rx_hd) ?
1628 move %xcc, 1, %g2
1629 cmp %g1, %g6 ! if old rx tail == new rx tail...
1630 move %xcc, %g0, %g2 ! ...don't sent intr (no data sent)
1631
1632 !! %g2 send interrupt flag
1633 !! %g3 sp endpoint
1634
1635 HVRET
1636 SET_SIZE(sram_ldc_push_data)
1637
1638
1639#else /* CONFIG_FPGA */
1640
1641 ENTRY_NP(sp_to_guest_pull_data)
1642 ! Should never be invoked if this hypervisor is compiled
1643 ! without the FPGA support.
1644 ba herr_inval
1645 nop
1646 SET_SIZE(sp_to_guest_pull_data)
1647
1648 ENTRY_NP(guest_to_sp_tx_set_tail)
1649 ! Should never be invoked if this hypervisor is compiled
1650 ! without the FPGA support.
1651 ba herr_inval
1652 nop
1653 SET_SIZE(guest_to_sp_tx_set_tail)
1654#endif /* CONFIG_FPGA */
1655
1656
1657 /*
1658 * LDC set map table
1659 * Binds the identified table with the given LDC
1660 *
1661 * int ldc_set_map_table(uint64_t channel, uint64_t table_ra,
1662 * uint64_t table_entries);
1663 *
1664 * %o0 channel
1665 * %o1 table_ra (0 disables mapping for given channel)
1666 * %o2 table_entries
1667 *
1668 * EINVAL - illegal map table ra
1669 * ECHANNEL - illegal channel
1670 *
1671 */
1672 ENTRY_NP(hcall_ldc_set_map_table)
1673
1674 btst (64 - 1), %o1
1675 bnz,pn %xcc, herr_badalign ! base addr not aligned ?
1676 nop
1677
1678 ! Is Channel legit ?
1679 GUEST_STRUCT(%g1)
1680 set GUEST_LDC_MAX_CHANNEL_IDX, %g2
1681 ldx [%g1 + %g2], %g2
1682 cmp %o0, %g2
1683 bgeu,pn %xcc, herr_invalchan
1684 nop
1685
1686 !! %g1 guest struct
1687
1688 mulx %o0, LDC_ENDPOINT_SIZE, %g2
1689 set GUEST_LDC_ENDPOINT, %g3
1690 add %g2, %g3, %g2
1691 add %g2, %g1, %g2
1692 ldub [%g2 + LDC_IS_LIVE], %g3
1693 brz,pn %g3, herr_invalchan
1694 ldub [%g2 + LDC_IS_PRIVATE], %g3
1695 brnz,pn %g3, herr_invalchan
1696 nop
1697
1698 brz,pn %o2, 1f ! size of 0 unconfigures table
1699 nop
1700
1701 cmp %o2, LDC_MIN_MAP_TABLE_ENTRIES ! Table smaller than min size
1702 blt,pn %xcc, herr_inval
1703 nop
1704
1705 set LDC_MAX_MAP_TABLE_ENTRIES, %g4 ! Table size bigger than
1706 cmp %o2, %g4 ! largest index we can store?
1707 bge,pn %xcc, herr_inval ! invalid size
1708 nop
1709
1710 sub %o2, 1, %g4 ! Table size is not a ^2
1711 andcc %o2, %g4, %g0
1712 bne,pn %xcc, herr_inval
1713 nop
1714
1715 sllx %o2, LDC_MTE_SHIFT, %g4 ! convert #entries to bytes
1716 sub %g4, 1, %g5
1717
1718 RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, herr_noraddr, %g6, %g5)
1719
1720 stx %o1, [%g2 + LDC_MAP_TABLE_RA]
1721 stx %g5, [%g2 + LDC_MAP_TABLE_PA]
1722 stx %o2, [%g2 + LDC_MAP_TABLE_NENTRIES]
1723 stx %g4, [%g2 + LDC_MAP_TABLE_SZ] ! set last - tbl configured
1724
1725 HCALL_RET(EOK)
1726
1727
17281:
1729 ! Remove the map table
1730 stx %g0, [%g2 + LDC_MAP_TABLE_SZ] ! set first - tbl unconfigured
1731 stx %g0, [%g2 + LDC_MAP_TABLE_NENTRIES]
1732 stx %g0, [%g2 + LDC_MAP_TABLE_RA]
1733 stx %g0, [%g2 + LDC_MAP_TABLE_PA]
1734
1735 HCALL_RET(EOK)
1736
1737 SET_SIZE(hcall_ldc_set_map_table)
1738
1739
1740/*
1741 * LDC get map table
1742 * Returns the map table infor for the given channel number
1743 *
1744 * int ldc_get_map_table(uint64_t channel);
1745 *
1746 * %o0 channel
1747 *
1748 * %o0 status
1749 * %o1 base ra
1750 * %o2 num entries
1751 *
1752 * ECHANNEL - illegal channel
1753 *
1754 */
1755 ENTRY_NP(hcall_ldc_get_map_table)
1756
1757 ! Is Channel legit ?
1758 GUEST_STRUCT(%g1)
1759 set GUEST_LDC_MAX_CHANNEL_IDX, %g2
1760 ldx [%g1 + %g2], %g2
1761 cmp %o0, %g2
1762 bgeu,pn %xcc, herr_invalchan
1763 nop
1764
1765 !! %g1 guest struct
1766
1767 mulx %o0, LDC_ENDPOINT_SIZE, %g2
1768 set GUEST_LDC_ENDPOINT, %g3
1769 add %g2, %g3, %g2
1770 add %g2, %g1, %g2
1771 ldub [%g2 + LDC_IS_LIVE], %g3
1772 brz,pn %g3, herr_invalchan
1773 ldub [%g2 + LDC_IS_PRIVATE], %g3
1774 brnz,pn %g3, herr_invalchan
1775 nop
1776
1777 ldx [%g2 + LDC_MAP_TABLE_NENTRIES], %o2
1778 ldx [%g2 + LDC_MAP_TABLE_RA], %o1
1779
1780 HCALL_RET(EOK)
1781
1782 SET_SIZE(hcall_ldc_get_map_table)
1783
1784
1785
1786/*
1787 * Copy in/out the data from the given cookie_addr
1788 * for length bytes (multiple of 8) to/from the
1789 * real address given.
1790 * flags=0 for copyin (remote cookie buffer to local real),
1791 * flags=1 for copyout (local real to remote cookie buffer)
1792 * For EOK actual length copied is returned.
1793 *
1794 * int ldc_copy(ldc_channel_t chan,
1795 * uint64_t flags,
1796 * uint64_t cookie_addr,
1797 * uint64_t raddr,
1798 * uint64_t length,
1799 * uint64_t * lengthp);
1800 *
1801 * %o0 channel
1802 * %o1 flags
1803 * %o2 cookieaddr
1804 * %o3 raddr
1805 * %o4 length
1806 *
1807 * On EOK length copied is in %o1
1808 *
1809 * ECHANNEL - illegal channel
1810 * ENOMAP - illegal / invalid cookie addr
1811 * ENORADDR - illegal raddr to raddr+length
1812 * EBADALIGN - badly aligned raddr or cookie_addr or length
1813 * EINVAL - illegal flags etc., no map table assigned
1814 * EBADPGSZ - page size does not match
1815 *
1816 *
1817 * FIXME: Items to clean up with this block are:
1818 * 1. Careful access to a MTE mapping with a quadd load
1819 * 2. Better bcopy loop / legion version ..
1820 * 2b. Possibly restrict alignment to either block or page
1821 * size to enable allocating block stores.
1822 * 3. Enable tracking of in progress copies so
1823 * mapping tables can be dempaped or allocated to other
1824 * channels
1825 * 4. Reference count for channels using these tables.
1826 */
1827 ENTRY_NP(hcall_ldc_copy)
1828
1829 ! Enforce 8 byte alignment early
1830 or %o2, %o3, %g1
1831 or %g1, %o4, %g1
1832 andcc %g1, 7, %g0
1833 bne herr_badalign
1834 nop
1835
1836 ! Copy direction is either 0 or 1 : in or out
1837 cmp %o1, LDC_COPY_OUT
1838 bg,pn %xcc, herr_inval
1839 nop
1840
1841 ! length <=0 error
1842 cmp %o4, 0
1843 ble,pn %xcc, herr_inval
1844 nop
1845
1846 ! ch_id, scr, guest, endpoint
1847 GET_LDC_ENDPOINT(%o0, %g3, %g1, %g7)
1848
1849 !! %g1 guest struct
1850 !! %g7 endpoint
1851
1852 ! Check the RA range we've been given
1853 ! hstruct, raddr, size, fail_label, scr
1854 RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o3, %o4, herr_noraddr, %g4, %g3)
1855
1856 ! Check endpoint connection type
1857 ldub [%g7 + LDC_TARGET_TYPE], %g3 ! if type=0, target is guest
1858 brz,pt %g3, .guest_copy ! else copying directly
1859 nop ! to a HV PA (hvctl)
1860
1861 ! Copy data from HV memory to guest RA
1862 !! %o2 PA of address in HV
1863 ! no need for table lookup
1864
1865 ! FIXME: we need a way to verify if the PA is
1866 ! valid.
1867 ! Must check against the currently valid ranges of phys mem
1868
1869 ! Limit length to the end of a page
1870
1871 sethi %hi(8192), %g4
1872 sub %g4, 1, %g5
1873 and %o2, %g5, %g5 ! offset into remote page
1874
1875 sub %g4, %g5, %g4 ! distance to end of page
1876 cmp %g4, %o4 ! distance <= length
1877 movl %xcc, %g4, %o4 ! clamp to end of page
1878
1879 ba,pt %xcc, .copy_data
1880 nop
1881
1882.guest_copy:
1883 ! Find the corresponding endpoint at the recipient ..
1884 ldx [%g7 + LDC_TARGET_GUEST], %g2
1885 ldx [%g7 + LDC_TARGET_CHANNEL], %g5
1886 mulx %g5, LDC_ENDPOINT_SIZE, %g5
1887 set GUEST_LDC_ENDPOINT, %g6
1888 add %g2, %g5, %g4
1889 add %g4, %g6, %g4 ! g4 is the target endpoint
1890 ldub [%g4 + LDC_IS_LIVE], %g3
1891 brz,pn %g3, herr_invalchan
1892 ldub [%g4 + LDC_IS_PRIVATE], %g3
1893 brnz,pn %g3, herr_invalchan
1894 nop
1895
1896 ! Find our map table PA from endpoint
1897 ldx [%g4 + LDC_MAP_TABLE_PA], %g3
1898 brz,pn %g3, herr_nomap
1899 nop
1900
1901 !! %g1 guest
1902 !! %g2 target guest
1903 !! %g3 map table pa
1904 !! %g4 ldc struct
1905
1906 srlx %o2, 60, %g6 ! extract page size
1907 brnz,pn %g6, herr_badpgsz ! only 8K for now
1908 nop
1909
1910 sllx %o2, 8, %g6 ! Extract cookie idx
1911 srlx %g6, 13+8, %g6 ! Bits: 56-pg_size_bits
1912
1913 ldx [%g4 + LDC_MAP_TABLE_NENTRIES], %g5 ! table entries
1914 cmp %g6, %g5
1915 bge,pn %xcc, herr_nomap ! off end of table ?
1916 nop
1917
1918 sllx %g6, LDC_MTE_SHIFT, %g6 ! Size of MTE
1919 ldx [%g3 + %g6], %g3 ! MTE itself
1920
1921 srlx %g3, LDC_MTE_PERM_CPRD_BIT, %g5
1922 srlx %g5, %o1, %g5
1923 andcc %g5, 1, %g0
1924 bz,pn %xcc, herr_noaccess ! error for invalid MTE? FIXME
1925 nop
1926
1927 mov 1, %g5
1928 sllx %g5, 56, %g6
1929 sub %g6, 1, %g6
1930
1931 sllx %g5, 13, %g5 ! Currently assume 8K pages
1932 sub %g5, 1, %g5
1933
1934 andn %g6, %g5, %g6 ! Create a Rpfn mask
1935 and %g3, %g6, %g3 ! Extract target real pfn
1936 and %o2, %g5, %g5 ! Extract page offset
1937 or %g3, %g5, %g3 ! Target RA
1938
1939 ! Limit the copy to the map page size
1940 add %g5, %o4, %g4 ! Length of copy + offset
1941 sethi %hi(8192), %g6
1942 cmp %g6, %g4
1943 sub %g6, %g5, %g6
1944 movl %xcc, %g6, %o4 ! Limit copy to end of page
1945
1946
1947 ! Check that we are in range
1948 ! get the PA for the exported page
1949 RA2PA_RANGE_CONV_UNK_SIZE(%g2, %g3, %o4, herr_noraddr, %g6, %o2)
1950
1951 ! Copy the data from one page to another
1952 !! %o1 direction of copy
1953 !! %o2 phys addr of remote buffer
1954 !! %o3 checked RA of local buffer
1955 !! %o4 copy length
1956 !! %g1 guest struct
1957.copy_data:
1958 RA2PA_CONV(%g1, %o3, %g5, %g6)
1959
1960 !! %g5 PA of local buffer
1961
1962 sethi %hi(8192), %g2
1963 sub %g2, 1, %g3
1964
1965 ! Clamp if closer to end of local page than remote buffer
1966 and %g5, %g3, %g3
1967 sub %g2, %g3, %g3 ! Number of bytes to end of page
1968 cmp %g3, %o4 ! if < copy len, clamp copy len
1969 movl %xcc, %g3, %o4
1970
1971 mov %g0, %g1 ! copy idx
1972
1973 ! See if it is a LDC_COPY_IN
1974 brz,pn %o1, 0f
1975 nop
1976
1977
1978 ! FIXME: Use an optimized block allocating copy !
1979 ! Copy out
19801:
1981 ldx [%g5 + %g1], %g7
1982 stx %g7, [%o2 + %g1]
1983 add %g1, 8, %g1
1984 cmp %g1, %o4
1985 bne,pt %xcc, 1b
1986 nop
1987
1988 ba,pt %xcc, 2f
1989 nop
1990
19910: ! Copy in
19921:
1993 ldx [%o2 + %g1], %g7
1994 stx %g7, [%g5 + %g1]
1995 add %g1, 8, %g1
1996 cmp %g1, %o4
1997 bne,pt %xcc, 1b
1998 nop
1999
20002:
2001
2002 ! Cleanup before return
2003 mov %o4, %o1
2004 mov %g0, %o2
2005 mov %g0, %o3
2006 mov %g0, %o4
2007
2008 HCALL_RET(EOK)
2009 SET_SIZE(hcall_ldc_copy)
2010
2011
2012/*
2013 * Map in function ...
2014 *
2015 * Allocate hypervisor map table entry for given cookie
2016 * so that we can track usage model.
2017 *
2018 * Returns a RA that identifies the tracking slot.
2019 *
2020 * int hv_ldc_mapin(int channel, uint64_t cookie)
2021 *
2022 * inputs:
2023 * %o0 channel
2024 * %o1 cookie
2025 *
2026 * returns:
2027 * %o0 status
2028 * %o1 raddr
2029 * %o2 perms
2030 *
2031 * ECHANNEL - illegal channel
2032 * ENOMAP - illegal / invalid cookie addr
2033 * ENORADDR - illegal raddr to raddr+length
2034 * EBADALIGN - badly aligned raddr or cookie_addr or length
2035 * EINVAL - illegal flags etc., no map table assigned
2036 * EBADPGSZ - page size does not match
2037 */
2038 ENTRY_NP(hcall_ldc_mapin)
2039
2040 ! Stash the args into the cpu struct scratch area
2041 ! so we can retrieve later (7 g regs arent enough)
2042 ! FIXME: could use cpu push/pop
2043 VCPU_STRUCT(%g1)
2044 stx %o0, [%g1 + CPU_SCR0]
2045 stx %o1, [%g1 + CPU_SCR1]
2046
2047 GET_LDC_ENDPOINT( %o0, %g7, %g1, %g2 ) !! %g1=guest, %g2=endpoint
2048
2049 ! FIXME: Workaround to disable mapin support if
2050 ! HV MD does not have RA range in guest MDs
2051 ! to be deleted later ...
2052 set GUEST_LDC_MAPIN_BASERA, %g3
2053 ldx [%g1 + %g3], %g3
2054 brz,pn %g3, herr_notsupported
2055 nop
2056
2057
2058 ldub [%g2 + LDC_TARGET_TYPE], %g3 ! if type=0, target is guest
2059 brnz,pt %g3, herr_inval ! we're ok - fail HV
2060 nop ! connected channels
2061
2062 ! Find the target endpoint ...
2063 ldx [%g2 + LDC_TARGET_GUEST], %g3
2064 ldx [%g2 + LDC_TARGET_CHANNEL], %g4
2065 mulx %g4, LDC_ENDPOINT_SIZE, %g4
2066 set GUEST_LDC_ENDPOINT, %g5
2067 add %g3, %g5, %g5
2068 add %g5, %g4, %g4 ! g4 is the target endpoint
2069 ldub [%g4 + LDC_IS_LIVE], %g5
2070 brz,pn %g5, herr_invalchan
2071 ldub [%g4 + LDC_IS_PRIVATE], %g5
2072 brnz,pn %g5, herr_invalchan
2073 nop
2074
2075 !! %g1 my guest
2076 !! %g3 target guest
2077 !! %g4 target endpoint
2078
2079 ! Find table index after page size
2080 srlx %o1, LDC_COOKIE_PGSZC_SHIFT, %g2 ! extract pg size
2081
2082 ! If its not a valid page size assume cookie is bogus
2083 set TTE_VALIDSIZEARRAY, %g5
2084 srlx %g5, %g2, %g5
2085 btst 1, %g5
2086 bz,pn %xcc, herr_badpgsz
2087 nop
2088
2089
2090 sllx %o1, 64-LDC_COOKIE_PGSZC_SHIFT, %g6 ! shift off pg size
2091 mulx %g2, 3, %g5
2092 add %g5, 13 + (64-LDC_COOKIE_PGSZC_SHIFT), %g5 ! shift for index
2093 srlx %g6, %g5, %g5 ! get index
2094
2095 !! %g1 my guest
2096 !! %g2 page_size
2097 !! %g3 target guest
2098 !! %g4 target endpoint
2099 !! %g5 table idx
2100
2101 ! Check index to see if it is in range
2102 ldx [%g4 + LDC_MAP_TABLE_NENTRIES], %g6
2103 cmp %g5, %g6
2104 bge,pn %xcc, herr_nomap
2105 nop
2106
2107 ! Find remote map table PA from endpoint
2108 ldx [%g4 + LDC_MAP_TABLE_PA], %g6
2109 brz,pn %g6, herr_nomap
2110 nop
2111
2112 !! %g1 my guest struct
2113 !! %g2 page_size
2114 !! %g3 target guest
2115 !! %g4 target endpoint
2116 !! %g5 table idx
2117 !! %g6 maptable pa
2118
2119 ! Pull map table entry
2120 sllx %g5, LDC_MTE_SHIFT, %g7
2121 ldx [%g6 + %g7], %g7
2122
2123 ! Check we have permission for something ... (ie valid)
2124 ! We ignore the copyin/copyout flags
2125 srlx %g7, LDC_MTE_PERM_SHIFT, %o0
2126 and %o0, LDC_MTE_PERM_MASK, %o0
2127 andcc %o0, LDC_MAPIN_MASK, %g0
2128 beq,pn %xcc, herr_noaccess
2129 nop
2130
2131 ! NOTE: We already checked the cookie against the list of
2132 ! legit page sizes, so if this matches we dont need
2133 ! to check if MTE page size is legal.
2134 ! Match page size ....
2135
2136 srlx %g7, LDC_MTE_PGSZ_SHIFT, %o0
2137 and %o0, LDC_MTE_PGSZ_MASK, %o0
2138 xorcc %o0, %g2, %g0
2139 bne,pn %xcc, herr_badpgsz
2140 nop
2141
2142 !! %g1 my guest struct
2143 !! %g2 page_size
2144 !! %g3 target guest
2145 !! %g4 target endpoint
2146 !! %g5 table idx
2147 !! %g6 maptable pa
2148 !! %g7 maptable entry
2149
2150 ! sacrifice %g2 and %g4 in here...
2151
2152 ! Check if the entry has a legit RA range for the
2153 ! other guest
2154
2155 mulx %g2, 3, %o0
2156 add %o0, 13, %o0
2157 mov 1, %o1
2158 sllx %o1, %o0, %o1
2159 sub %o1, 1, %o1 !! %o1 size of page
2160
2161 ! Extract the RA & check for alignment
2162 sllx %g7, LDC_MTE_RSVD_BITS, %g2
2163 srlx %g2, 13+LDC_MTE_RSVD_BITS, %g2
2164 sllx %g2, 13, %o0
2165 btst %o0, %o1
2166 bne,pn %xcc, herr_badalign ! page not aligned
2167 nop
2168
2169 ! Is this a legit RA for the other guest
2170 RA2PA_RANGE_CONV_UNK_SIZE(%g3, %o0, %o1, herr_noraddr, %g4, %g2)
2171
2172 ! Finally everything checks out
2173 ! Let's allocate a mapin entry - fill in the details
2174 ! and return.
2175
2176 ! Picking a mapin entry has to be atomic in case we're in
2177 ! a race with another map-in
2178
2179 set GUEST_LDC_MAPIN_FREE_IDX, %o0
2180 add %g1, %o0, %o0
21811:
2182 ldx [%o0], %o1 ! -1 == No more free available
2183 brlz %o1, herr_toomany
2184 nop
2185
2186 mulx %o1, LDC_MAPIN_SIZE, %g2 ! Extract next idx from free
2187 set GUEST_LDC_MAPIN, %g4
2188 add %g1, %g4, %g4
2189 add %g4, %g2, %g2
2190 ldx [%g2 + LDC_MI_NEXT_IDX], %g4
2191
2192 casxa [%o0]ASI_P, %o1, %g4
2193 cmp %g4, %o1
2194 bne,pn %xcc, 1b
2195 nop
2196
2197 ! Fill in the mapin entry values
2198 !
2199 !! %o1 index of mapin entry
2200 !! %g1 my guest struct
2201 !! %g2 address of mapin entry
2202 !! %g3 target guest
2203 !! %g5 table idx
2204 !! %g6 maptable pa
2205 !! %g7 maptable entry
2206
2207 ! Stash away remainding O regs for space.
2208
2209 VCPU_STRUCT(%o0)
2210 ldx [%o0 + CPU_SCR0], %g4
2211 sth %g4, [%g2 + LDC_MI_LOCAL_ENDPOINT]
2212
2213 stw %g5, [%g2 + LDC_MI_MAP_TABLE_IDX]
2214
2215 ! g4 and g5 now available
2216
2217 srlx %g7, LDC_MTE_PERM_SHIFT, %g4
2218 and %g4, LDC_MTE_PERM_MASK, %o2 ! Return perms
2219 and %g4, LDC_MAPIN_MASK, %g4
2220 stb %g4, [%g2 + LDC_MI_PERMS]
2221
2222 ! Extract the RA again
2223 sllx %g7, LDC_MTE_RSVD_BITS, %g4
2224 srlx %g4, 13+LDC_MTE_RSVD_BITS, %g4
2225 sllx %g4, 13, %g4
2226
2227 RA2PA_CONV(%g3, %g4, %g4, %g5)
2228
2229 ! RA was already checked for alignment, so
2230 ! PA must also be aligned - no check required
2231 stx %g4, [%g2 + LDC_MI_PA]
2232
2233 ! Extract the page size again
2234 srlx %g7, LDC_MTE_PGSZ_SHIFT, %g4
2235 and %g4, LDC_MTE_PGSZ_MASK, %g4
2236 stb %g4, [%g2 + LDC_MI_PG_SIZE]
2237
2238 ! Use it to figure out the RA offset from the
2239 ! cookie we were given.
2240
2241 mulx %g4, 3, %g4
2242 add %g4, 13, %g4
2243 mov 1, %o0
2244 sllx %o0, %g4, %o0
2245 sub %o0, 1, %o0
2246
2247 ! Clear everything else
2248 stx %g0, [%g2 + LDC_MI_VA]
2249 sth %g0, [%g2 + LDC_MI_VA_CTX]
2250 stx %g0, [%g2 + LDC_MI_IO_VA]
2251 stx %g0, [%g2 + LDC_MI_MMU_MAP]
2252
2253#if (LARGEST_PG_SIZE_BITS+LDC_NUM_MAPINS_BITS) > 55
2254#error Sanity check failed: too many mapin entries to encode in RA
2255#endif
2256 sllx %o1, LARGEST_PG_SIZE_BITS, %o1
2257 set GUEST_LDC_MAPIN_BASERA, %g4
2258 ldx [%g1 + %g4], %g4
2259 add %g4, %o1, %o1
2260
2261 ! scrap g1 and replace with cpu struct
2262 VCPU_STRUCT(%g1)
2263 ldx [%g1 + CPU_SCR1], %g1
2264
2265 and %g1, %o0, %o0
2266 or %o0, %o1, %o1
2267
2268 HCALL_RET(EOK)
2269 SET_SIZE(hcall_ldc_mapin)
2270
2271 /*
2272 * Simple support function to release and clear a mapin entry
2273 * that is no longer in use
2274 *
2275 * %g1 guest struct
2276 * %g2 index of mapin entry
2277 * %g3 scratch
2278 * %g4 scratch
2279 * %g5 scratch
2280 * %g7 return addr
2281 */
2282
2283 ENTRY_NP(mapin_free)
2284 set GUEST_LDC_MAPIN_FREE_IDX, %g3
2285 add %g1, %g3, %g3 ! Address of free idx
2286
2287 set GUEST_LDC_MAPIN, %g4
2288 add %g1, %g4, %g4
2289 mulx %g2, LDC_MAPIN_SIZE, %g1
2290 add %g1, %g4, %g1 ! Address of mapin entry
2291
2292 ! Perms are used to determine liveness
2293 stb %g0, [%g1 + LDC_MI_PERMS]
2294
22951:
2296 ldx [%g3], %g4
2297 stx %g4, [%g1 + LDC_MI_NEXT_IDX] ! do first so link-in is atomic
2298
2299 mov %g2, %g5
2300 casxa [%g3]ASI_P, %g4, %g5
2301 cmp %g5, %g4
2302 bne,pn %xcc, 1b
2303 nop
2304
2305 HVRET
2306 SET_SIZE(mapin_free)
2307
2308
2309/*
2310 * callback for console input
2311 *
2312 * %g1 callback arg (guest struct)
2313 * %g2 payload
2314 * %g7 return address
2315 */
2316 ENTRY_NP(cons_ldc_callback)
2317
2318 ! get the console struct for this endpt
2319 set GUEST_CONSOLE, %g5
2320 add %g1, %g5, %g1
2321
2322 ldub [%g1 + CONS_STATUS], %g4 ! chk if console is ready
2323 andcc %g4, LDC_CONS_READY, %g0
2324 bz,pn %xcc, 1f
2325 nop
2326
2327 ldub [%g2], %g6 ! get the packet type
2328 cmp %g6, LDC_CONSOLE_DATA
2329 beq,pt %xcc, .console_data
2330 nop
2331
2332 cmp %g6, LDC_CONSOLE_CONTROL ! check if control pkt
2333 bne,pt %xcc, 1f ! else drop pkt and return
2334 nop
2335
2336 ldub [%g1 + CONS_STATUS], %g4 ! get console status
2337 or %g4, LDC_CONS_BREAK, %g4 ! set the break bit
2338
2339 lduw [%g2 + LDC_CONS_CTRL_MSG], %g6 ! get control message
2340 set CONS_BREAK, %g5
2341 cmp %g6, %g5 ! chk it is a break
2342 beq,a,pn %xcc, 1f
2343 stb %g4, [%g1 + CONS_STATUS]
2344
2345 ldub [%g1 + CONS_STATUS], %g4 ! get console status
2346 or %g4, LDC_CONS_HUP, %g4 ! set the hup bit
2347
2348 lduw [%g2 + LDC_CONS_CTRL_MSG], %g6 ! get control message
2349 set CONS_HUP, %g5
2350 cmp %g6, %g5 ! chk it is a hangup
2351 beq,a,pn %xcc, 1f
2352 stb %g4, [%g1 + CONS_STATUS]
2353
2354 ba 1f ! invalid control message
2355 nop ! drop it
2356
2357.console_data:
2358 ldx [%g1 + CONS_INTAIL], %g4 ! get current tail
2359 ldub [%g2 + LDC_CONS_SIZE], %g3 ! get num chars
2360 add %g2, LDC_CONS_PAYLOAD, %g2 ! start from second word
23612:
2362 add %g1, CONS_INBUF, %g5 ! incoming buffer
2363 add %g5, %g4, %g5 ! dest buf offset loc
2364 ldub [%g2], %g6
2365 stb %g6, [%g5]
2366 inc %g2 ! inc src addr
2367 inc %g4 ! inc inbuf tail
2368 and %g4, (CONS_INBUF_SIZE - 1), %g4 ! and wrap
2369 deccc %g3 ! dec size
2370 bnz,pn %xcc, 2b ! if not zero, copy next byte
2371 nop
2372 stx %g4, [%g1 + CONS_INTAIL] ! store the new tail
2373
23741:
2375 jmp %g7 + 4
2376 nop
2377
2378 SET_SIZE(cons_ldc_callback)
2379
2380
2381
2382 ! Real address access
2383 !! %g1 guest struct
2384 !! %g2 real address to be mapped
2385 !! %g3 mapin base RA
2386 !! %g4 offset in mapin RA region
2387
2388 ! XXX - Need to check that bits between LARGEST_PG_SIZE_BITS and
2389 ! actual page size are zero toprevent aliasing.
2390
2391 ENTRY_NP(ldc_dmmu_mapin_ra)
2392 .global rdmmu_miss_not_found2
2393 GET_MAPIN_ENTRY(%g1, %g4, %g5)
2394 ldub [%g5 + LDC_MI_PERMS], %g6
2395 andcc %g6, LDC_MAP_R|LDC_MAP_W, %g0
2396 beq,pn %xcc, rdmmu_miss_not_found2
2397 nop
2398
2399 ! OK have a mapable RA with some permissions
2400 ! stuff the DTLB with the right info.
2401 ! FIXME: cant support write only with N1s TLB
2402 ldub [%g5 + LDC_MI_PG_SIZE], %g4
2403 ldx [%g5 + LDC_MI_PA], %g3
2404 or %g4, %g3, %g3
2405 andcc %g6, LDC_MAP_W, %g0
2406
2407 mov 0, %g6
2408 movne %xcc, TTE_W, %g6
2409 or %g6, TTE_CP|TTE_P, %g6 ! TTE_CP wont fit cmov
2410
2411 or %g6, %g3, %g3
2412 mov 1, %g6
2413 sllx %g6, 63, %g6 ! valid bit
2414 or %g6, %g3, %g3
2415
2416 ! TAG register is still configured for us
2417 mov TLB_IN_REAL|TLB_IN_4V_FORMAT, %g2
2418 stxa %g3, [%g2]ASI_DTLB_DATA_IN
2419
2420 ! Now the expensive bit - track the MMU usage
2421 STRAND_STRUCT(%g1)
2422 ldub [%g1 + STRAND_ID], %g1 /* FIXME: use asr26? */
2423 srlx %g1, 2, %g1
2424 add %g1, MIE_RA_MMU_SHIFT, %g1
2425 mov 1, %g2
2426 sllx %g2, %g1, %g1
2427 add %g5, LDC_MI_MMU_MAP, %g5 !!!
2428 ATOMIC_OR_64(%g5, %g1, %g2, %g3)
2429
2430 retry
2431 SET_SIZE(ldc_dmmu_mapin_ra)
2432
2433
2434 !
2435 ! FIXME: need equivalent for immu of dmmu_mapin_ra
2436 !
2437
2438
2439 !! %g1 cpu struct
2440 !! %g2 --
2441 !! %g3 raddr
2442 !! %g4 page size (bytes)
2443 !! %g5 offset into mapin region
2444 !! %g6 guest struct
2445 !! %g7 TTE ready for pa
2446 !
2447 ! FIXME: need to cross leverage with ldc_dmmu_mapin_ra
2448 ! FIXME: Need to check that bits between LARGEST_PG_SIZE_BITS and
2449 ! actual page size are zero toprevent aliasing.
2450
2451 ENTRY_NP(ldc_dtsb_hit)
2452 .global revec_dax
2453
2454 GET_MAPIN_ENTRY(%g6, %g5, %g2)
2455
2456 ldub [%g2 + LDC_MI_PERMS], %g6
2457 andcc %g6, LDC_MAP_R|LDC_MAP_W, %g0
2458 beq,pn %xcc, .inval_ra
2459 nop
2460
2461 ! OK have a mapable RA with some permissions
2462 ! stuff the DTLB with the right info.
2463 ! FIXME: cant support write only with N1s TLB
2464
2465 ! Fail page size mis-match otherwise our demap doesnt work
2466 and %g7, TTE_SZ_MASK, %g4
2467 ldub [%g2 + LDC_MI_PG_SIZE], %g5
2468 cmp %g4, %g5
2469 bne,pn %xcc, .inval_pgsz
2470 nop
2471
2472 ldx [%g2 + LDC_MI_PA], %g5
2473 or %g7, %g5, %g5
2474
2475 andcc %g6, LDC_MAP_W, %g0
2476 mov 0, %g6
2477 move %xcc, TTE_W, %g6
2478
2479 andn %g5, %g6, %g5 ! clear w bit if no write permission
2480
2481 CLEAR_TTE_LOCK_BIT(%g5, %g6) ! %g5 tte (force clear lock bit)
2482
2483 ! TAG register is still configured for us
2484 mov TLB_IN_4V_FORMAT, %g6
2485 stxa %g5, [%g6]ASI_DTLB_DATA_IN
2486
2487 ! Pull the fault address and context, save it
2488 mov MMU_TAG_ACCESS, %g3
2489 ldxa [%g3]ASI_DMMU, %g3
2490 set (NCTXS-1), %g4
2491 and %g3, %g4, %g5 ! context
2492 sth %g5, [%g2 + LDC_MI_VA_CTX]
2493 andn %g3, %g4, %g3 ! vaddr
2494 stx %g3, [%g2 + LDC_MI_VA]
2495
2496 ! Now the expensive bit - track the MMU usage
2497 VCPU2STRAND_STRUCT(%g1, %g1)
2498 ldub [%g1 + STRAND_ID], %g1
2499 srlx %g1, 2, %g1
2500 add %g1, MIE_VA_MMU_SHIFT, %g1
2501 mov 1, %g5
2502 sllx %g5, %g1, %g1
2503 add %g2, LDC_MI_MMU_MAP, %g2
2504 ATOMIC_OR_64(%g2, %g1, %g5, %g3)
2505
2506 retry
2507
2508.inval_pgsz:
2509 ba,pt %xcc, .revec
2510 mov MMU_FT_PAGESIZE, %g1
2511
2512.inval_ra:
2513 ba,pt %xcc, .revec
2514 mov MMU_FT_INVALIDRA, %g1
2515
2516.revec:
2517 ! Pull the fault address and context again
2518 mov MMU_TAG_ACCESS, %g3
2519 ldxa [%g3]ASI_DMMU, %g3
2520 set (NCTXS-1), %g2
2521 and %g3, %g2, %g5 ! context
2522 andn %g3, %g2, %g3 ! addr
2523 ba,pt %xcc, revec_dax
2524 nop
2525
2526 SET_SIZE(ldc_dtsb_hit)
2527
2528 !! %g1 cpu struct
2529 !! %g2 real address
2530 !! %g3 TTE without PA/RA field
2531 !! %g4 --
2532 !! %g5 offset into mapin region
2533 !! %g6 guest struct
2534 !! %g7 --
2535 !! %o0 vaddr
2536 !! %o1 ctx
2537 !! %o2 tte
2538 !! %o3 flags
2539
2540 !
2541 ! FIXME: need to cross leverage with ldc_dmmu_mapin_ra
2542 ! FIXME: Need to check that bits between LARGEST_PG_SIZE_BITS and
2543 ! actual page size are zero toprevent aliasing.
2544
2545 ENTRY_NP(ldc_map_addr_api)
2546 .global hcall_mmu_map_addr_ra_not_found
2547
2548 GET_MAPIN_ENTRY(%g6, %g5, %g2)
2549
2550 ! If we ask for an I mapping, make sure we have MAP_X
2551 ! If we ask for a D mapping, make sure we have at least MAP_R
2552 ! .. we require MAP_W if the TTE has WPERM and we ask for D mapping
2553
2554 btst MAP_DTLB, %o3
2555 movne %xcc, LDC_MAP_R, %g4
2556 btst MAP_ITLB, %o3
2557 movne %xcc, LDC_MAP_X, %g7
2558 or %g4, %g7, %g4
2559 btst TTE_W, %g3
2560 movne %xcc, LDC_MAP_W, %g7
2561 or %g4, %g7, %g7
2562
2563 ldub [%g2 + LDC_MI_PERMS], %g4
2564 and %g4, %g7, %g4
2565 brz,pn %g4, herr_inval
2566 nop
2567
2568 ldub [%g2 + LDC_MI_VA_MMU_MAP], %g7
2569 brz,pt %g7, 1f
2570 nop
2571 ldx [%g2 + LDC_MI_VA], %g7
2572 cmp %g7, %o0
2573 bne,pn %xcc, herr_inval
2574 nop
2575 lduh [%g2 + LDC_MI_VA_CTX], %g7
2576 cmp %g7, %o1
2577 bne,pn %xcc, herr_inval
2578 nop
25791:
2580
2581 andcc %g4, LDC_MAP_W, %g0
2582 mov %g0, %g4
2583 move %xcc, TTE_W, %g4 ! if !=0 move correct value in
2584 andn %g3, %g4, %g3 ! clear w bit if no write permission
2585
2586 ! OK have a mapable RA with some permissions
2587 ! stuff the DTLB with the right info.
2588 ! FIXME: cant support write only with N1s TLB
2589
2590 ! Fail page size mis-match otherwise our demap doesnt work
2591 and %g3, TTE_SZ_MASK, %g4
2592 ldub [%g2 + LDC_MI_PG_SIZE], %g7
2593 cmp %g4, %g7
2594 bne,pn %xcc, hcall_mmu_map_addr_ra_not_found
2595 nop
2596
2597 ldx [%g2 + LDC_MI_PA], %g7
2598 or %g3, %g7, %g3 ! start building TTE
2599
2600 CLEAR_TTE_LOCK_BIT(%g3, %g7) ! %g3 tte (force clear lock bit)
2601
2602#ifndef STRICT_API
2603 set (NCTXS - 1), %g7
2604 and %o1, %g7, %o1
2605 andn %o0, %g7, %o0
2606#endif /* STRICT_API */
2607 or %o0, %o1, %g4 !! %g4 tag
2608
2609 mov MMU_TAG_ACCESS, %g7
2610 mov TLB_IN_4V_FORMAT, %g6
2611
2612 btst MAP_DTLB, %o3
2613 be %xcc, 2f
2614 btst MAP_ITLB, %o3 ! Test in delay slot to setup xcc
2615
2616 stxa %g4, [%g7]ASI_DMMU
2617 membar #Sync
2618 stxa %g3, [%g6]ASI_DTLB_DATA_IN
2619 bz,pn %xcc, 1f
2620 nop
2621
26222:
2623 stxa %g4, [%g7]ASI_IMMU
2624 membar #Sync
2625 stxa %g3, [%g6]ASI_ITLB_DATA_IN
2626
26271:
2628 stx %o0, [%g2 + LDC_MI_VA]
2629 sth %o1, [%g2 + LDC_MI_VA_CTX]
2630 ! Now the expensive bit - track the MMU usage
2631 VCPU2STRAND_STRUCT(%g1, %g1)
2632 ldub [%g1 + STRAND_ID], %g1
2633 srlx %g1, 2, %g1
2634 add %g1, MIE_VA_MMU_SHIFT, %g1
2635 mov 1, %g5
2636 sllx %g5, %g1, %g1
2637 add %g2, LDC_MI_MMU_MAP, %g2
2638 ATOMIC_OR_64(%g2, %g1, %g5, %g3)
2639
2640 HCALL_RET(EOK)
2641
2642 SET_SIZE(ldc_map_addr_api)
2643
2644
2645/*
2646 * ldc_unmap
2647 *
2648 * Unmaps the page mapped at RA from the local guest.
2649 *
2650 * FIXME: We assume for the moment the guest has done the right
2651 * demap clean up - so all we have to do here is free up the
2652 * internal structure associated with the map table entry.
2653 * This is currently a big security hole, but not a functional
2654 * gap for the moment, since the only guest we have (Solaris) is
2655 * well behaved. So we have to fix this eventually.
2656 * FIXME: Check that unused raddr bits are zero in case of aliasing
2657 *
2658 * arg0 raddr (%o0)
2659 * --
2660 * ret0 status (%o0)
2661 *
2662 */
2663 ENTRY_NP(hcall_ldc_unmap)
2664
2665 GUEST_STRUCT(%g1)
2666
2667 set GUEST_LDC_MAPIN_BASERA, %g4
2668 ldx [%g1 + %g4], %g4
2669 subcc %o0, %g4, %g5
2670 bneg,pn %xcc, herr_noraddr
2671 nop
2672 set GUEST_LDC_MAPIN_SIZE, %g4
2673 ldx [%g1 + %g4], %g4
2674 subcc %g5, %g4, %g4
2675 brgez,pn %g4, herr_noraddr
2676 nop
2677
2678 srlx %g5, LARGEST_PG_SIZE_BITS, %g2 ! mapin idx
2679 mulx %g2, LDC_MAPIN_SIZE, %g5
2680 set GUEST_LDC_MAPIN, %g6
2681 add %g5, %g6, %g5
2682 add %g1, %g5, %g5 ! addr of mapin entry
2683 ldub [%g5 + LDC_MI_PERMS], %g6
2684 brz,pn %g6, herr_nomap
2685 nop
2686
2687 HVCALL(mapin_free)
2688
2689 HCALL_RET(EOK)
2690 SET_SIZE(hcall_ldc_unmap)
2691
2692
2693/*
2694 * ldc_revoke
2695 *
2696 * FIXME: Currently not used, so fault in when we have a guest that
2697 * requires it.
2698 *
2699 * arg0 channel (%o0)
2700 * arg1 cookie (%o1)
2701 * arg2 revoke_cookie (%o2)
2702 * --
2703 * ret0 status (%o0)
2704 *
2705 */
2706 ENTRY_NP(hcall_ldc_revoke)
2707 HCALL_RET(ENOTSUPPORTED)
2708 SET_SIZE(hcall_ldc_revoke)
2709
2710
2711
2712/*
2713 * ldc_vintr_getcookie
2714 *
2715 * arg0 devhandle (%o0)
2716 * arg1 devino (%o1)
2717 * --
2718 * ret0 status (%o0)
2719 * ret1 cookie (%o1)
2720 */
2721 ENTRY_NP(ldc_vintr_getcookie)
2722
2723 cmp %o1, MAX_LDC_INOS
2724 bgeu,pn %xcc, get_cookie_fail
2725 nop
2726
2727 GUEST_STRUCT(%g1)
2728 set GUEST_LDC_I2E, %g2
2729 add %g1, %g2, %g1
2730 mulx %o1, LDC_I2E_SIZE, %g2
2731 add %g1, %g2, %g1
2732 ldx [%g1 + LDC_I2E_MAPREG], %g1
2733 brz,pn %g1, get_target_fail
2734 nop
2735
2736 ! load the cookie from the target endpoint structure mapreg
2737 ldx [%g1 + LDC_MAPREG_COOKIE], %o1
2738 HCALL_RET(EOK)
2739
2740get_cookie_fail:
2741 HCALL_RET(EINVAL)
2742 SET_SIZE(ldc_vintr_getcookie)
2743
2744/*
2745 * ldc_vintr_setcookie
2746 *
2747 * arg0 devhandle (%o0)
2748 * arg1 devino (%o1)
2749 * arg2 cookie (%o2)
2750 * --
2751 * ret0 status (%o0)
2752 */
2753 ENTRY_NP(ldc_vintr_setcookie)
2754
2755 cmp %o1, MAX_LDC_INOS
2756 bgeu,pn %xcc, set_cookie_fail
2757 nop
2758
2759 GUEST_STRUCT(%g1)
2760 set GUEST_LDC_I2E, %g2
2761 add %g1, %g2, %g1
2762 mulx %o1, LDC_I2E_SIZE, %g2
2763 add %g1, %g2, %g1
2764 ldx [%g1 + LDC_I2E_MAPREG], %g1
2765 brz,pn %g1, set_cookie_fail
2766 nop
2767
2768 ! store the cookie to the target endpoint structure mapreg
2769 stx %o2, [%g1 + LDC_MAPREG_COOKIE]
2770 HCALL_RET(EOK)
2771
2772set_cookie_fail:
2773 HCALL_RET(EINVAL)
2774 SET_SIZE(ldc_vintr_setcookie)
2775
2776/*
2777 * ldc_vintr_getvalid
2778 *
2779 * arg0 devhandle (%o0)
2780 * arg1 devino (%o1)
2781 * --
2782 * ret0 status (%o0)
2783 * ret1 intr valid state (%o1)
2784 */
2785 ENTRY_NP(ldc_vintr_getvalid)
2786
2787 cmp %o1, MAX_LDC_INOS
2788 bgeu,pn %xcc, get_valid_fail
2789 nop
2790
2791 GUEST_STRUCT(%g1)
2792
2793 set GUEST_LDC_I2E, %g2
2794 add %g1, %g2, %g1
2795 mulx %o1, LDC_I2E_SIZE, %g2
2796 add %g1, %g2, %g1
2797 ldx [%g1 + LDC_I2E_MAPREG], %g1
2798 brz,pn %g1, get_valid_fail
2799 nop
2800
2801 !! %g1 mapreg
2802 ldub [%g1 + LDC_MAPREG_VALID], %o1
2803 HCALL_RET(EOK)
2804
2805get_valid_fail:
2806 HCALL_RET(EINVAL)
2807 SET_SIZE(ldc_vintr_getvalid)
2808
2809/*
2810 * ldc_vintr_setvalid
2811 *
2812 * arg0 devhandle (%o0)
2813 * arg1 devino (%o1)
2814 * arg2 intr valid state (%o2) 1: Valid 0: Invalid
2815 * --
2816 * ret0 status (%o0)
2817 */
2818 ENTRY_NP(ldc_vintr_setvalid)
2819
2820 cmp %o1, MAX_LDC_INOS
2821 bgeu,pn %xcc, set_valid_fail
2822 nop
2823
2824 GUEST_STRUCT(%g1)
2825
2826 set GUEST_LDC_I2E, %g2
2827 add %g1, %g2, %g1
2828 mulx %o1, LDC_I2E_SIZE, %g2
2829 add %g1, %g2, %g1
2830 ldx [%g1 + LDC_I2E_MAPREG], %g2
2831 brz,pn %g2, set_valid_fail
2832 ldx [%g1 + LDC_I2E_ENDPOINT], %g1
2833
2834 !! %g1 endpoint
2835 !! %g2 mapreg
2836
2837 ! for valid RX interrupts only, if state is IDLE check if we need
2838 ! to send interrupt
2839 brz,pn %o2, 1f ! interrupt VALID?
2840 stb %o2, [%g2 + LDC_MAPREG_VALID] ! regardless, fill in status
2841 add %g1, LDC_RX_MAPREG, %g3
2842 cmp %g2, %g3 ! RX or TX
2843 bne,pn %xcc, 1f
2844
2845 ld [%g2 + LDC_MAPREG_STATE], %g3 ! only bother if interrupt
2846 cmp %g3, INTR_IDLE ! IDLE
2847 bne,pn %xcc, 1f
2848 nop
2849
2850 ! check if there are pending pkts, if any, notify guest
2851 HVCALL(hv_ldc_chk_pkts)
28521:
2853 HCALL_RET(EOK)
2854
2855set_valid_fail:
2856 HCALL_RET(EINVAL)
2857 SET_SIZE(ldc_vintr_setvalid)
2858
2859/*
2860 * ldc_vintr_gettarget
2861 *
2862 * arg0 devhandle (%o0)
2863 * arg1 devino (%o1)
2864 * --
2865 * ret0 status (%o0)
2866 * ret1 cpuid (%o1)
2867 */
2868 ENTRY_NP(ldc_vintr_gettarget)
2869
2870 cmp %o1, MAX_LDC_INOS
2871 bgeu,pn %xcc, get_target_fail
2872 nop
2873
2874 GUEST_STRUCT(%g1)
2875
2876 set GUEST_LDC_I2E, %g2
2877 add %g1, %g2, %g1
2878 mulx %o1, LDC_I2E_SIZE, %g2
2879 add %g1, %g2, %g1
2880 ldx [%g1 + LDC_I2E_MAPREG], %g1
2881 brz,pn %g1, get_target_fail
2882 nop
2883
2884 ! load the cpup from the target endpoint structure mapreg
2885 ! and grab vcpuid
2886 ldx [%g1 + LDC_MAPREG_CPUP], %g1
2887 ldub [%g1 + CPU_VID], %o1
2888
2889 HCALL_RET(EOK)
2890
2891get_target_fail:
2892 HCALL_RET(EINVAL)
2893 SET_SIZE(ldc_vintr_gettarget)
2894
2895/*
2896 * ldc_vintr_settarget
2897 *
2898 * arg0 devhandle (%o0)
2899 * arg1 devino (%o1)
2900 * arg2 cpuid (%o2)
2901 * --
2902 * ret0 status (%o0)
2903 */
2904 ENTRY_NP(ldc_vintr_settarget)
2905
2906 cmp %o1, MAX_LDC_INOS
2907 bgeu,pn %xcc, set_target_fail
2908 nop
2909
2910 GUEST_STRUCT(%g1)
2911
2912 ! convert vcpuid to pcpup
2913 VCPUID2CPUP(%g1, %o2, %o2, herr_nocpu, %g2)
2914
2915 set GUEST_LDC_I2E, %g2
2916 add %g1, %g2, %g1
2917 mulx %o1, LDC_I2E_SIZE, %g2
2918 add %g1, %g2, %g1
2919 ldx [%g1 + LDC_I2E_MAPREG], %g1
2920 brz,pn %g1, set_target_fail
2921 nop
2922
2923 ! store cpup to target endpoint structure mapreg
2924 stx %o2, [%g1 + LDC_MAPREG_CPUP]
2925 HCALL_RET(EOK)
2926
2927set_target_fail:
2928 HCALL_RET(EINVAL)
2929 SET_SIZE(ldc_vintr_settarget)
2930
2931/*
2932 * ldc_vintr_getstate
2933 *
2934 * arg0 devhandle (%o0)
2935 * arg1 devino (%o1)
2936 * --
2937 * ret0 status (%o0)
2938 * ret1 (%o1) 0: idle 1: received 2: delivered
2939 */
2940 ENTRY_NP(ldc_vintr_getstate)
2941
2942 cmp %o1, MAX_LDC_INOS
2943 bgeu,pn %xcc, get_state_fail
2944 nop
2945
2946 GUEST_STRUCT(%g1)
2947
2948 set GUEST_LDC_I2E, %g2
2949 add %g1, %g2, %g1
2950 mulx %o1, LDC_I2E_SIZE, %g2
2951 add %g1, %g2, %g1
2952 ldx [%g1 + LDC_I2E_MAPREG], %g1
2953 brz,pn %g1, get_state_fail
2954 nop
2955
2956 !! %g1 mapreg
2957 ld [%g1 + LDC_MAPREG_STATE], %o1
2958 HCALL_RET(EOK)
2959
2960get_state_fail:
2961 HCALL_RET(EINVAL)
2962 SET_SIZE(ldc_vintr_getstate)
2963
2964/*
2965 * ldc_vintr_setstate
2966 *
2967 * arg0 devhandle (%o0)
2968 * arg1 devino (%o1)
2969 * arg2 (%o2) 0: idle 1: received 2: delivered
2970 * --
2971 * ret0 status (%o0)
2972 */
2973 ENTRY_NP(ldc_vintr_setstate)
2974
2975 cmp %o1, MAX_LDC_INOS
2976 bgeu,pn %xcc, set_state_fail
2977 nop
2978 brlz,pn %o2, set_state_fail
2979 cmp %o2, INTR_DELIVERED
2980 bgu,pn %xcc, set_state_fail
2981 nop
2982
2983 GUEST_STRUCT(%g1)
2984
2985 set GUEST_LDC_I2E, %g2
2986 add %g1, %g2, %g1
2987 mulx %o1, LDC_I2E_SIZE, %g2
2988 add %g1, %g2, %g1
2989 ldx [%g1 + LDC_I2E_MAPREG], %g2
2990 brz,pn %g2, set_state_fail
2991 ldx [%g1 + LDC_I2E_ENDPOINT], %g1
2992
2993 !! %g1 endpoint
2994 !! %g2 mapreg
2995
2996 ! for valid RX interrupts only, if state is IDLE check if we need
2997 ! to send interrupt
2998 cmp %o2, INTR_IDLE ! interrrupt IDLE?
2999 bne,pn %xcc, 1f
3000 st %o2, [%g2 + LDC_MAPREG_STATE] ! regardless, fill in state
3001
3002 add %g1, LDC_RX_MAPREG, %g3
3003 cmp %g2, %g3 ! RX or TX?
3004 bne,pn %xcc, 1f
3005 ldub [%g2 + LDC_MAPREG_VALID], %g3 ! only bother if interrupt
3006 brz,pn %g3, 1f ! VALID
3007 nop
3008
3009 ! check if there are pending pkts, if any, notify guest
3010 HVCALL(hv_ldc_chk_pkts)
30111:
3012 HCALL_RET(EOK)
3013
3014set_state_fail:
3015 HCALL_RET(EINVAL)
3016 SET_SIZE(ldc_vintr_setstate)
3017
3018/*
3019 * Wrapper around hv_ldc_send_pkt, so it can be called from C
3020 * SPARC ABI requries only that g2,g3,g4 are preserved across
3021 * function calls.
3022 * %o0 in-HV channel idx
3023 * %o1 payload paddr
3024 *
3025 * void c_hvldc_send(int hv_endpt, void *payload)
3026 */
3027 ENTRY(c_hvldc_send)
3028
3029 STRAND_PUSH(%g2, %g6, %g7)
3030 STRAND_PUSH(%g3, %g6, %g7)
3031 STRAND_PUSH(%g4, %g6, %g7)
3032
3033 mov %o0, %g1
3034 mov %o1, %g2
3035 HVCALL(hv_ldc_send_pkt)
3036
3037 STRAND_POP(%g4, %g6)
3038 STRAND_POP(%g3, %g6)
3039 STRAND_POP(%g2, %g6)
3040
3041 retl
3042 nop
3043 SET_SIZE(c_hvldc_send)
3044
3045/*
3046 * Internal function to send a 64-byte LDC pkt to a guest.
3047 *
3048 * hv_ldc_send_pkt(channel, paddr)
3049 *
3050 * In:
3051 * %g1 in-HV channel idx
3052 * %g2 payload paddr
3053 * Out:
3054 * %g1 0 if success, else error value
3055 * Misc:
3056 * clobbers everything
3057 * (g7 is return address from caller)
3058 */
3059 ENTRY_NP(hv_ldc_send_pkt)
3060
3061 ROOT_STRUCT(%g4)
3062
3063 ldx [%g4 + CONFIG_HV_LDCS], %g4
3064 mulx %g1, LDC_ENDPOINT_SIZE, %g5
3065 add %g4, %g5, %g4
3066
3067 ldub [%g4 + LDC_IS_LIVE], %g5
3068 brz,a,pn %g5, .ldc_send_ret
3069 mov ECHANNEL, %g1
3070
3071 /*
3072 * Packet going to guest
3073 *
3074 * %g2 payload PA
3075 * %g4 hv channel ptr
3076 *
3077 * find target guest and LDC target idx
3078 */
3079 ldx [%g4 + LDC_TARGET_GUEST], %g3
3080 ldx [%g4 + LDC_TARGET_CHANNEL], %g4
3081 setx GUEST_LDC_ENDPOINT, %g5, %g1
3082 mulx %g4, LDC_ENDPOINT_SIZE, %g5
3083 add %g3, %g5, %g5
3084 add %g1, %g5, %g5
3085
3086 /* no Q configured ? */
3087 ldx [%g5 + LDC_RX_QSIZE], %g1
3088 brz,a,pn %g1, .ldc_send_ret
3089 mov EIO, %g1
3090
3091 /*
3092 * check if the target q is full
3093 * if queue is full, wait until pkts
3094 * get read from the queue
3095 */
3096 ldx [%g5 + LDC_RX_QSIZE], %g1
3097 lduw [%g5 + LDC_RX_QTAIL], %g6
3098 dec Q_EL_SIZE, %g1
3099 add %g6, Q_EL_SIZE, %g6
3100 and %g6, %g1, %g1
3101 lduw [%g5 + LDC_RX_QHEAD], %g6
3102 cmp %g1, %g6
3103 be,a,pn %xcc, .ldc_send_ret
3104 mov EWOULDBLOCK, %g1
3105
3106 /*
3107 * %g1 scratch
3108 * %g2 payload PA (modified)
3109 * %g3 target cpu ptr (modified)
3110 * %g4 target channel idx (modified)
3111 * %g5 endpoint ptr (modified)
3112 * %g6 scratch (modified)
3113 * ---
3114 * %g1 0=success, else error value
3115 */
3116 /*
3117 * append data to the tail of an LDC RX queue and
3118 * send cross call notification if necessary
3119 *
3120 * NOTE: prior to calling this macro, you must already have
3121 * verified that there is indeed room available in the RX queue
3122 * since this macro does not check for that.
3123 */
3124
3125 lduw [%g5 + LDC_RX_QTAIL], %g6
3126 ldx [%g5 + LDC_RX_QBASE_PA], %g1
3127 add %g6, %g1, %g6
3128 ldx [%g2 + 0], %g1
3129 stx %g1, [%g6 + 0]
3130 ldx [%g2 + 8], %g1
3131 stx %g1, [%g6 + 8]
3132 ldx [%g2 + 16], %g1
3133 stx %g1, [%g6 + 16]
3134 ldx [%g2 + 24], %g1
3135 stx %g1, [%g6 + 24]
3136 ldx [%g2 + 32], %g1
3137 stx %g1, [%g6 + 32]
3138 ldx [%g2 + 40], %g1
3139 stx %g1, [%g6 + 40]
3140 ldx [%g2 + 48], %g1
3141 stx %g1, [%g6 + 48]
3142 ldx [%g2 + 56], %g1
3143 stx %g1, [%g6 + 56]
3144
3145 lduw [%g5 + LDC_RX_QTAIL], %g2
3146 ldx [%g5 + LDC_RX_QSIZE], %g1
3147 dec Q_EL_SIZE, %g1
3148 add %g2, Q_EL_SIZE, %g6
3149 and %g6, %g1, %g1
3150 lduw [%g5 + LDC_RX_QHEAD], %g6
3151 stw %g1, [%g5 + LDC_RX_QTAIL]
3152 cmp %g2, %g6
3153 bne,pn %xcc, .ldc_send_ret_ok ! if queue was non-empty, then we
3154 nop ! don't need to send notification.
3155
3156 STRAND_PUSH(%g7, %g2, %g1)
3157 mov %g5, %g3
3158 HVCALL(hv_ldc_cpu_notify)
3159 STRAND_POP(%g7, %g2)
3160
3161.ldc_send_ret_ok:
3162 mov %g0, %g1
3163.ldc_send_ret:
3164 HVRET
3165 SET_SIZE(hv_ldc_send_pkt)
3166
3167 /*
3168 * %g3 Rx endpoint
3169 * %g1-%g6 clobbered
3170 */
3171
3172 ENTRY(hv_ldc_cpu_notify)
3173
3174 ldx [%g3 + LDC_RX_MAPREG + LDC_MAPREG_CPUP], %g1
3175 brz,pn %g1, 2f
3176 nop
3177
3178 ldub [%g3 + LDC_RX_MAPREG + LDC_MAPREG_VALID], %g5
3179 brz,pn %g5, 2f /* interrupt VALID? */
3180 nop
3181
3182 ld [%g3 + LDC_RX_MAPREG + LDC_MAPREG_STATE], %g5
3183 cmp %g5, INTR_IDLE /* interrupt IDLE? */
3184 bne,pn %xcc, 2f
3185 nop
3186
3187
3188 add %g3, LDC_RX_MAPREG + LDC_MAPREG_STATE, %g4
3189 set INTR_DELIVERED, %g5
3190 set INTR_IDLE, %g6
3191 casa [%g4]ASI_P, %g6, %g5
3192 cmp %g5, INTR_IDLE
3193 bne,a,pn %xcc, 2f
3194 nop
3195
3196 ldx [%g3 + LDC_RX_MAPREG + LDC_MAPREG_COOKIE], %g5
3197 mov %g5, %g3
3198 ba send_dev_mondo
3199 mov 1, %g2
32002:
3201
3202 HVRET
3203 SET_SIZE(hv_ldc_cpu_notify)
3204/*
3205 * Wrapper around hv_ldc_cpu_notify so it can be called from C
3206 * SPARC ABI requries only that g2,g3,g4 are preserved across
3207 * function calls.
3208 *
3209 * %o0 target ldc endpt
3210 *
3211 * void c_ldc_cpu_notify(ldc_endpoint_t *t_endpt)
3212 */
3213 ENTRY(c_ldc_cpu_notify)
3214
3215 STRAND_PUSH(%g2, %g6, %g7)
3216 STRAND_PUSH(%g3, %g6, %g7)
3217 STRAND_PUSH(%g4, %g6, %g7)
3218
3219 mov %o0, %g3
3220 HVCALL(hv_ldc_cpu_notify)
3221
3222 STRAND_POP(%g4, %g6)
3223 STRAND_POP(%g3, %g6)
3224 STRAND_POP(%g2, %g6)
3225
3226 retl
3227 nop
3228 SET_SIZE(c_ldc_cpu_notify)
3229
3230
3231#if CONFIG_FPGA /* { Support for LDC over the FPGA mailbox */
3232
3233/*
3234 * Wrapper around LDC_SEND_SP_INTR so it can be called from C
3235 * SPARC ABI requries only that %g2,%g3,%g4 are preserved across
3236 * function calls and we don't use any of these registers here.
3237 *
3238 * %o0 = target endpoint
3239 * %o1 = reason for interrupt
3240 *
3241 * void c_ldc_send_sp_intr(stuct *ldc_endpoint target, endpoint, int reason)
3242 */
3243 ENTRY(c_ldc_send_sp_intr)
3244
3245 mov %o0, %g5
3246 mov %o1, %g6
3247
3248 ! %g5 target endpoint
3249 ! %g1 = scratch
3250 ! %g7 = scratch
3251 ! %g6 = reason
3252 LDC_SEND_SP_INTR(%g5, %g1, %g7, %g6)
3253
3254 retl
3255 nop
3256 SET_SIZE(c_ldc_send_sp_intr)
3257
3258
3259#endif /* } */