Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / common / src / hcall_ncs.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: hcall_ncs.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)hcall_ncs.s 1.13 07/09/12 SMI"
50
51#include <sys/asm_linkage.h>
52#include <sys/htypes.h>
53#include <hypervisor.h>
54#include <sparcv9/misc.h>
55#include <sparcv9/asi.h>
56#include <asi.h>
57#include <mmu.h>
58#include <sun4v/traps.h>
59#include <sun4v/asi.h>
60#include <sun4v/mmu.h>
61#include <sun4v/queue.h>
62#include <devices/pc16550.h>
63
64#include <debug.h>
65#include <config.h>
66#include <guest.h>
67#include <md.h>
68#include <abort.h>
69#include <offsets.h>
70#include <ncs.h>
71#include <util.h>
72#include <mau.h>
73
74/*
75 *-----------------------------------------------------------
76 * Function: setup_mau
77 * Called via setup_cpu() if the given cpu has access
78 * to a mau. If the handle is non-NULL then the mau
79 * struct has already been initialized.
80 * Arguments:
81 * Input:
82 * %i0 - CONFIG
83 * %g1 - cpu struct
84 * %g2 - ino
85 * %g7 - return address
86 * Output:
87 * %g1 - &config.maus[mau-id] or NULL (0) if error.
88 *
89 * Uses: %g1-%g6,%l3
90 *-----------------------------------------------------------
91 */
92
93 ENTRY_NP(setup_mau)
94
95 ldx [%g1 + CPU_MAU], %g3
96 brz,pn %g3, 1f
97 nop
98 ldx [%g3 + MAU_PID], %g6
99 cmp %g6, NMAUS
100 bgeu,a,pn %xcc, 1f
101 mov %g0, %g3
102
103 VCPU2STRAND_STRUCT(%g1, %g5)
104 ldub [%g5 + STRAND_ID], %g5
105 and %g5, NSTRANDS_PER_MAU_MASK, %g5 ! %g5 = hw thread-id
106 mov 1, %g4
107 sllx %g4, %g5, %g4
108 ldx [%g3 + MAU_CPUSET], %g6
109 btst %g4, %g6
110 bnz,pn %xcc, 1f
111 nop
112 bset %g4, %g6
113 stx %g6, [%g3 + MAU_CPUSET]
114
115 add %g5, MAU_CPU_ACTIVE, %g5
116 mov -1, %g6
117 stb %g6, [%g3 + %g5]
118
119 ldx [%g3 + MAU_CPUSET], %g5
120 cmp %g4, %g5 ! 1st (only) cpu?
121 bne,pt %xcc, 1f
122 nop
123
124 ldx [%g3 + MAU_PID], %g6
125 ID2HANDLE(%g6, MAU_HANDLE_SIG, %g6)
126 stx %g6, [%g3 + MAU_HANDLE]
127 stx %g2, [%g3 + MAU_INO]
128 /*
129 * Now set up mau queue.
130 */
131 MAU_CLEAR_QSTATE(%g3)
132 /*
133 * Now set up interrupt stuff.
134 */
135 ldx [%g1 + CPU_GUEST], %g1
136 LABEL_ADDRESS(mau_intr_getstate, %g4)
137 mov %g0, %g5
138 !!
139 !! %g1 = guestp
140 !! %g2 = ino
141 !! %g3 = &config.maus[mau-id]
142 !! %g4 = mau_intr_getstate
143 !! %g5 = NULL (no setstate callback)`
144 !! %g7 = return pc (set up in setup_cpu())
145 !!
146 /*
147 * Note that vdev_intr_register() clobbers %g1,%g3,%g5-%g7.
148 */
149 mov %g7, %l3 ! save return pc
150 HVCALL(vdev_intr_register)
151 stx %g1, [%g3 + MAU_IHDLR + CI_COOKIE]
152
153 mov MAU_STATE_RUNNING, %g2
154 stx %g2, [%g3 + MAU_STATE]
155
156 mov %l3, %g7 ! restore return pc
1571:
158 mov %g3, %g1 ! return &maus[mau-id]
159 HVRET
160
161 SET_SIZE(setup_mau)
162
163/*
164 * Wrapper around setup_mau, so it can be called from C
165 * SPARC ABI requries only that g2,g3,g4 are preserved across
166 * function calls.
167 * %g1 - cpu struct
168 * %g2 - ino
169 * %g3 - config
170 * Output:
171 * %g1 - &config.maus[mau-id] or NULL (0) if error.
172 *
173 * maup = c_setup_mau(vcpup, ino, &config);
174 *
175 */
176
177 ENTRY(c_setup_mau)
178
179 STRAND_PUSH(%g2, %g6, %g7)
180 STRAND_PUSH(%g3, %g6, %g7)
181 STRAND_PUSH(%g4, %g6, %g7)
182
183 mov %o0, %g1
184 mov %o1, %g2
185 mov %o2, %g3
186 HVCALL(setup_mau)
187 mov %g1, %o0
188
189 STRAND_POP(%g4, %g6)
190 STRAND_POP(%g3, %g6)
191 STRAND_POP(%g2, %g6)
192
193 retl
194 nop
195 SET_SIZE(c_setup_mau)
196
197/*
198 *-----------------------------------------------------------
199 * Function: stop_crypto()
200 *
201 * This routines needs to execute ON the the core
202 * containing the desired MAU to be stopped. This
203 * is accomplished by being called during stop_vcpu_cmd.
204 *
205 * We wait for the MAU to stop by doing a sync-load.
206 * If the MAU is currently busy running a job on behalf
207 * of the current strand (cpu) being stopped then the
208 * sync-load will wait for it to complete. If the MAU
209 * is busy running a job for a different strand (cpu)
210 * then the sync-load will immediately return. Since
211 * the job being executed is on behalf of a different
212 * cpu then the immediate return is okay since we only
213 * care about the local cpu being stopped.
214 *
215 * Note that we have to enable interrupts while doing
216 * this load to ensure the MAU can complete the operation
217 * including possibly handling an interrupt.
218 *
219 * Since we are stopping the current cpu we can be
220 * assured that any new MAU jobs will not be issued
221 * on this strand (cpu). Any subsequent MAU jobs will
222 * be issued from some other strand.
223 *
224 * Arguments:
225 * Input:
226 * %g1 - cpu struct
227 * %g2 - guest struct
228 * %g7 - return address
229 *-----------------------------------------------------------
230 */
231 ENTRY_NP(stop_crypto)
232
233 ldx [%g1 + CPU_MAU], %g3
234 brz,pn %g3, 1f
235 nop
236
237 VCPU2STRAND_STRUCT(%g1, %g5)
238 ldub [%g5 + STRAND_ID], %g5
239 and %g5, NSTRANDS_PER_CORE_MASK, %g5 ! %g5 = hw thread-id
240 add %g3, %g5, %g3
241 ldub [%g3 + MAU_CPU_ACTIVE], %g4
242 brz,pn %g4, 1f
243 nop
244
245 CRYPTO_STOP(%g4, %g5)
246
247 stb %g0, [%g3 + MAU_CPU_ACTIVE]
2481:
249 HVRET
250
251 SET_SIZE(stop_crypto)
252
253
254/*
255 *-----------------------------------------------------------
256 * Function: start_crypto()
257 *
258 * All we have to do here is set the MAU_CPU_ACTIVE word.
259 *
260 * Arguments:
261 * Input:
262 * %g1 - cpu struct
263 * %g2 - guest struct
264 * %g7 - return address
265 * Uses: %g3, %g4
266 *-----------------------------------------------------------
267 */
268 ENTRY_NP(start_crypto)
269
270 ldx [%g1 + CPU_MAU], %g3
271 brz,pn %g3, 1f
272 nop
273
274 VCPU2STRAND_STRUCT(%g1, %g4)
275 ldub [%g4 + STRAND_ID], %g4
276 and %g4, NSTRANDS_PER_CORE_MASK, %g4 ! %g4 = hw thread-id
277 add %g3, %g4, %g3
278 mov -1, %g4
279 stb %g4, [%g3 + MAU_CPU_ACTIVE]
2801:
281 HVRET
282
283 SET_SIZE(start_crypto)
284
285/*
286 *-----------------------------------------------------------
287 * Function: mau_intr()
288 * Called from within trap context.
289 * Changes MQ_HEAD only.
290 * Arguments:
291 * Input:
292 * %g1 - cpu struct
293 * Output:
294 *-----------------------------------------------------------
295 */
296 ENTRY_NP(mau_intr)
297
298 ldx [%g1 + CPU_MAU], %g2
299 brz,pn %g2, .mi_exit_nolock
300 nop
301
302 MAU_LOCK_ENTER(%g2, %g5, %g3, %g6)
303
304 ldx [%g2 + MAU_STATE], %g3
305 cmp %g3, MAU_STATE_RUNNING
306 bne,pn %xcc, .mi_exit
307 nop
308
309 VCPU2STRAND_STRUCT(%g1, %g7)
310 ldub [%g7 + STRAND_ID], %g7
311 and %g7, NSTRANDS_PER_MAU_MASK, %g7 ! %g7 = hw thread-id
312 add %g2, %g7, %g4
313 ldub [%g4 + MAU_CPU_ACTIVE], %g4
314 brz,pn %g4, .mi_exit
315 nop
316
317#ifdef ERRATA_192
318 ldx [%g2 + MAU_STORE_IN_PROGR], %g3
319 sub %g3, 1, %g3
320 brnz,pn %g3, .mi_no_stpr
321 nop
322 stx %g0, [%g2 + MAU_STORE_IN_PROGR]
323 ldx [%g2 + MAU_ENABLE_CWQ], %g3
324 brz,pn %g3, .mi_no_stpr
325 nop
326 mov ASI_SPU_CWQ_CSR_ENABLE, %g4
327 mov CWQ_CSR_ENABLED, %g3
328 stxa %g3, [%g4]ASI_STREAM ! re-enable the cwq
329.mi_no_stpr:
330#endif
331 ldx [%g2 + MAU_QUEUE + MQ_HEAD], %g3
332 ldx [%g2 + MAU_QUEUE + MQ_TAIL], %g4
333
334 mov %g0, %g6 ! do_intr flag
335
336.mi_chknext:
337 cmp %g3, %g4 ! queue empty?
338 be,a,pn %xcc, .mi_chkintr
339 st %g0, [%g2 + MAU_QUEUE + MQ_BUSY]
340 ldx [%g3 + NHD_STATE], %g4
341 /*
342 * If the descriptor is Pending, then we
343 * mark it Busy and start the job on the MA.
344 * There is no interrupt to the guest since
345 * obviously the job is not complete yet.
346 */
347 cmp %g4, ND_STATE_PENDING
348 bne,pt %xcc, .mi_chkbusy
349 nop
350 mov ND_STATE_BUSY, %g4
351 stx %g4, [%g3 + NHD_STATE]
352 add %g3, NHD_REGS, %g3
353 /*
354 * Load up the MAU registers and start the job.
355 * Note that we force the Interrupt bit to be on.
356 * We can assume given the fact that we arrived in
357 * this code from an interrupt, so all subsequent
358 * jobs must have it set.
359 *
360 * We are out of registers, so we hide our do_intr flag
361 * in %g2 which we know is a 8-byte aligned address and
362 * thus not using bit0.
363 */
364 or %g2, %g6, %g2
365 !!
366 !! %g3 = ncs_hvdesc.nhd_regs
367 !! %g7 = hw-thread-id
368 !!
369
370#ifdef ERRATA_192
371 MAU_LOAD1(%g2, %g3, %g7, %g1, 1, .mi_addr_err, .mi_chkrv, %g4, %g5, %g6)
372#else
373 MAU_LOAD(%g3, %g7, %g1, 1, .mi_addr_err, .mi_chkrv, %g4, %g5, %g6)
374#endif
375
376 !!
377 !! %g1 = return value (errno)
378 !!
379 and %g2, 1, %g6 ! get hidden do_intr flag
380 ba .mi_chkintr
381 andn %g2, 1, %g2 ! restore placeholder
382
383.mi_addr_err:
384 mov ENORADDR, %g1
385
386.mi_chkrv:
387 brnz,a,pn %g1, .mi_set_state
388 mov ND_STATE_ERROR, %g1
389 mov ND_STATE_DONE, %g1
390
391.mi_set_state:
392 sub %g3, NHD_REGS, %g3
393 stx %g1, [%g3 + NHD_STATE]
394
395 ldx [%g3 + NHD_TYPE], %g4
396 and %g4, ND_TYPE_END, %g5
397 !!
398 !! %g5 = non-zero == END
399 !!
400 movrnz %g5, 1, %g6
401
402 add %g3, NCS_HVDESC_SIZE, %g3 ! mq_head++
403 ldx [%g2 + MAU_QUEUE + MQ_END], %g4
404 cmp %g3, %g4 ! mq_head == mq_end?
405 bgeu,a,pn %xcc, .mi_qwrap
406 ldx [%g2 + MAU_QUEUE + MQ_BASE], %g3 ! mq_head = mq_base
407.mi_qwrap:
408 stx %g3, [%g2 + MAU_QUEUE + MQ_HEAD]
409 ldx [%g2 + MAU_QUEUE + MQ_TAIL], %g4
410 cmp %g3, %g4
411 move %xcc, 1, %g6
412 /*
413 * If previous descriptor was not in error or was the
414 * last one in a job, then check the next descriptor
415 * for normal processing.
416 */
417 brnz,pn %g5, .mi_chknext
418 cmp %g1, ND_STATE_ERROR
419 bne,pt %xcc, .mi_chknext
420 nop
421 /*
422 * If we reach here then we encountered an
423 * error on a descriptor within the middle
424 * of a job. Need to pop the entire job
425 * off the queue. We stop popping descriptors
426 * off until we either hit the Last one or
427 * hit the Tail of the queue.
428 * Note that we set state in all remaining
429 * descriptors in job to Error (ND_STATE_ERROR).
430 */
431 !!
432 !! %g1 = ND_STATE_ERROR
433 !!
434 cmp %g3, %g4 ! queue empty?
435 be,a,pn %xcc, .mi_genintr
436 st %g0, [%g2 + MAU_QUEUE + MQ_BUSY]
437 ba .mi_set_state
438 add %g3, NHD_REGS, %g3
439
440.mi_chkbusy:
441 /*
442 * If the descriptor is Busy, then we have
443 * been interrupted for the completion of
444 * this particular descriptor. If it is
445 * the End (last) descriptor in the job or
446 * the last descriptor in our queue, then we'll
447 * generate an interrupt to the guest.
448 */
449 cmp %g4, ND_STATE_BUSY
450 bne,pn %xcc, .mi_chkintr
451 nop
452
453 MAU_CHECK_ERR(%g1, %g4, %g5)
454 stx %g1, [%g3 + NHD_ERRSTATUS]
455
456 ba .mi_chkrv
457 add %g3, NHD_REGS, %g3
458
459.mi_chkintr:
460 brz,pt %g6, .mi_exit
461 nop
462
463.mi_genintr:
464 /*
465 * This is the time we would store something
466 * into maus[].MAU_INTR.CI_DATA if we wanted,
467 * however it is currently unused.
468 */
469 ldx [%g2 + MAU_IHDLR + CI_COOKIE], %g1
470 brz,pn %g1, .mi_exit
471 nop
472
473 MAU_LOCK_EXIT(%g2, %g5)
474
475 HVCALL(vdev_intr_generate)
476
477.mi_exit_nolock:
478 retry
479
480.mi_exit:
481
482 MAU_LOCK_EXIT(%g2, %g5)
483
484 retry
485
486 SET_SIZE(mau_intr)
487
488/*
489 *-----------------------------------------------------------
490 * Function: mau_intr_getstate()
491 * Arguments:
492 * Input:
493 * %g1 - maus[] struct
494 * %g7 - return pc
495 * Output:
496 *-----------------------------------------------------------
497 */
498 ENTRY_NP(mau_intr_getstate)
499
500 mov %g0, %g2
501 ldx [%g1 + MAU_QUEUE + MQ_HEAD], %g3
502 ldx [%g1 + MAU_QUEUE + MQ_HEAD_MARKER], %g4
503 cmp %g3, %g4
504 movne %xcc, 1, %g2
505 jmp %g7 + SZ_INSTR
506 mov %g2, %g1
507
508 SET_SIZE(mau_intr_getstate)
509
510/*
511 *-----------------------------------------------------------
512 * Function: hcall_ncs_request(int cmd, uint64_t arg, size_t sz)
513 * Arguments:
514 * Input:
515 * %o5 - hcall function number
516 * %o0 - NCS sub-function
517 * %o1 - Real address of 'arg' data structure
518 * %o2 - Size of data structure at 'arg'.
519 * Output:
520 * %o0 - EOK (on success),
521 * EINVAL, ENORADDR, EBADALIGN, EWOULDBLOCK (on failure)
522 *-----------------------------------------------------------
523 */
524 ENTRY_NP(hcall_ncs_request)
525
526 btst NCS_PTR_ALIGN - 1, %o1
527 bnz,pn %xcc, herr_badalign
528 nop
529 /*
530 * convert %o1 to physaddr for calls below,
531 */
532 GUEST_STRUCT(%g2)
533 RA2PA_RANGE_CONV_UNK_SIZE(%g2, %o1, %o2, herr_noraddr, %g3, %g4)
534
535 cmp %o0, NCS_V10_QTAIL_UPDATE
536 be %xcc, ncs_v10_qtail_update
537 nop
538
539 cmp %o0, NCS_V10_QCONF
540 be %xcc, ncs_v10_qconf
541 nop
542
543 HCALL_RET(EINVAL)
544
545 SET_SIZE(hcall_ncs_request)
546
547/*
548 *-----------------------------------------------------------
549 * Function: ncs_v10_qtail_update(int unused, ncs_qtail_update_arg_t *arg, size_t sz)
550 * Arguments:
551 * Input:
552 * %o5 - hcall function number
553 * %o0 - NCS sub-function
554 * %o1 - ncs_qtail_update_arg_t *
555 * %o2 - sizeof (ncs_qtail_update_arg_t)
556 * Output:
557 * %o0 - EOK (on success),
558 * EINVAL, ENORADDR, EWOULDBLOCK, EIO (on failure)
559 *-----------------------------------------------------------
560 */
561 ENTRY_NP(ncs_v10_qtail_update)
562
563 cmp %o2, NCS_QTAIL_UPDATE_ARG_SIZE
564 bne,pn %xcc, herr_inval
565 nop
566
567 VCPU_GUEST_STRUCT(%g7, %g4)
568
569 /*
570 * Ignore the MID that the guest passes. We use vMID's now
571 * so whatever it passes is likely wrong, just calculate the MID
572 * from the strand ID.
573 */
574 VCPU2STRAND_STRUCT(%g7, %g2)
575 ldub [%g2 + STRAND_ID], %g2
576 srlx %g2, STRANDID_2_COREID_SHIFT, %g2
577
578 cmp %g2, NMAUS
579 bgeu,pn %xcc, herr_inval
580 nop
581
582 GUEST_MID_GETMAU(%g4, %g2, %o2)
583 brz,pn %o2, herr_inval
584 nop
585
586 add %o2, MAU_QUEUE, %g1
587 !!
588 !! %g1 = maus[mid].mau_queue
589 !!
590 /*
591 * Make sure the tail index the caller
592 * gave us is a valid one for our queue,
593 * i.e. ASSERT(mq_nentries > nu_tail).
594 */
595 ldx [%g1 + MQ_NENTRIES], %g3
596 /*
597 * Error if queue not configured,
598 * i.e. MQ_NENTRIES == 0
599 */
600 brz,pn %g3, herr_inval
601 nop
602 ldx [%o1 + NU_TAIL], %g2
603 !!
604 !! %g3 = mau.mau_queue.mq_nentries
605 !! %g2 = ncs_qtail_update_arg.nu_tail
606 !!
607 cmp %g3, %g2
608 bleu,pn %xcc, herr_inval
609 nop
610
611 ldx [%o1 + NU_SYNCFLAG], %g6
612 movrnz %g6, 1, %g6
613
614 mov %g4, %o1 ! %o1 = guest struct
615 /*
616 * Turn tail index passed in by caller into
617 * actual pointer into queue.
618 */
619 sllx %g2, NCS_HVDESC_SHIFT, %g3
620 ldx [%g1 + MQ_BASE], %g4
621 add %g3, %g4, %g3
622 !!
623 !! %g3 = &mau_queue.mq_base[nu_tail] (new mq_tail)
624 !!
625 stx %g3, [%g1 + MQ_TAIL]
626
627.v1_qtail_dowork:
628 sub %g0, 1, %g2
629 st %g2, [%g1 + MQ_BUSY]
630
631 ldx [%g1 + MQ_HEAD], %g2
632 ldx [%g1 + MQ_END], %g5
633 !!
634 !! %g2 = mq_head
635 !! %g3 = mq_tail
636 !! %g5 = mq_end
637 !!
638 /*
639 * Need hw-thread-id for MA_CTL register.
640 * Start at mq_head and keep looking for work
641 * until we run into mq_tail.
642 */
643 VCPU2STRAND_STRUCT(%g7, %g7)
644 ldub [%g7 + STRAND_ID], %g7 ! %g7 = physical cpuid
645 and %g7, NSTRANDS_PER_MAU_MASK, %g7 ! phys cpuid -> hw threadid
646 !!
647 !! %o1 = guest struct
648 !! %g7 = hw-thread-id
649 !!
650
651.v1_qtail_loop:
652 cmp %g2, %g3 ! mq_head == mq_tail?
653 be,a,pn %xcc, .v1_qtail_done
654 stx %g2, [%g1 + MQ_HEAD]
655 /*
656 * Mark current descriptor busy.
657 */
658 mov ND_STATE_BUSY, %o0
659 stx %o0, [%g2 + NHD_STATE] ! nhd_state = BUSY
660 add %g2, NHD_REGS, %g2
661 !!
662 !! %g2 = ncs_hvdesc.nhd_regs
663 !! %g7 = hw-thread-id
664 !!
665 MAU_LOAD(%g2, %g7, %o0, %g6, .v1_qtail_addr_err, .v1_qtail_chk_rv, %o1, %o2, %g4)
666
667 /*
668 * If this was an asynchronous descriptor then
669 * we're done! Leave MQ_BUSY set.
670 */
671 brnz,pt %g6, .v1_qtail_done_async
672 nop
673
674 /*
675 * In Niagara2 the Load value from the Sync
676 * register simply indicates whether the MAU
677 * was busy (1 = yes, 0 = no) at the time we
678 * issued the Load. It does not indicate a
679 * success or failure of the MAU operation.
680 * So, we effectively ignore the Load value and
681 * check for errors in the HWE/INVOP bits in
682 * the Control register.
683 */
684 mov ASI_MAU_SYNC, %g4
685 ldxa [%g4]ASI_STREAM, %g0
686 /*
687 * Check error bits in Control register.
688 */
689 MAU_CHECK_ERR(%o0, %o1, %g4)
690
691.v1_qtail_chk_rv:
692 /*
693 * Determine appropriate state to set
694 * descriptor to.
695 */
696 brnz,a,pn %o0, .v1_qtail_set_state
697 mov ND_STATE_ERROR, %o2
698 mov ND_STATE_DONE, %o2
699.v1_qtail_set_state:
700 sub %g2, NHD_REGS, %g2
701 !!
702 !! %g2 = &ncs_hvdesc
703 !!
704 stx %o2, [%g2 + NHD_STATE]
705 brnz,a,pn %o0, .v1_qtail_err
706 stx %g2, [%g1 + MQ_HEAD]
707
708 ldx [%g1 + MQ_BASE], %g4
709 add %g2, NCS_HVDESC_SIZE, %g2 ! mq_head++
710 cmp %g2, %g5 ! mq_head == mq_end?
711 ba,pt %xcc, .v1_qtail_loop
712 movgeu %xcc, %g4, %g2 ! mq_head = mq_base
713
714.v1_qtail_done:
715 ba hret_ok
716 st %g0, [%g1 + MQ_BUSY]
717
718.v1_qtail_done_async:
719 ba hret_ok
720 nop
721
722.v1_qtail_addr_err:
723 ba .v1_qtail_chk_rv
724 mov ENORADDR, %o0
725
726.v1_qtail_err:
727 !!
728 !! %o0 = EWOULDBLOCK, EINVAL, ENORADDR, EIO
729 !!
730 st %g0, [%g1 + MQ_BUSY]
731
732 HCALL_RET(%o0)
733
734 SET_SIZE(ncs_v10_qtail_update)
735
736/*
737 *-----------------------------------------------------------
738 * Function: ncs_v10_qconf(int unused, ncs_qconf_arg_t *arg, size_t sz)
739 * Arguments:
740 * Input:
741 * %o5 - hcall function number
742 * %o0 - NCS sub-function
743 * %o1 - ncs_qconf_arg_t *
744 * %o2 - sizeof (ncs_qconf_arg_t)
745 * Output:
746 * %o0 - EOK (on success),
747 * EBADALIGN, ENORADDR, EINVAL (on failure)
748 *-----------------------------------------------------------
749 */
750 ENTRY_NP(ncs_v10_qconf)
751
752 cmp %o2, NCS_QCONF_ARG_SIZE
753 bne,pn %xcc, herr_inval
754 nop
755
756 ldx [%o1 + NQ_MID], %g2 ! %g2 = mid
757 cmp %g2, NMAUS
758 bgeu,pn %xcc, herr_inval
759 nop
760
761 GUEST_STRUCT(%g1)
762 /*
763 * Recall that the driver code simply increments
764 * through all the possible vMIDs when doing a qconf,
765 * regardless of whether they are actually present
766 * or not. As a result, it is possible for
767 * the following macro to return null if the guest
768 * does not have access to that MAU. This is not a
769 * critical error since the driver code will never
770 * attempt to use a non-present mau, however the
771 * driver code cannot currently handle a "no mau"
772 * error return from this HV call and since the driver
773 * code is at present off-limit for repair, we have
774 * to fake success.
775 */
776 /*
777 * Guests calculate the MAUID based on cpu id, which are
778 * virtual ids. But firmware uses physical MIDs. So we need
779 * to translate the guest's vMID to a physical MID.
780 * Loop through the ROOT MID array and add 1 to the vMID for
781 * each unconfigured MAU we find.
782 */
783
784 mov 0, %g4
7850:
786 cmp %g4, NMAUS
787 bgeu,pn %xcc, hret_ok
788 nop
789 GUEST_MID_GETMAU(%g1, %g4, %g3)
790 brz,pn %g3, 1f
791 cmp %g2, %g4
792 be %xcc, 2f
793 nop
794 ba 0b
7951: inc %g4
796 ba 0b
797 inc %g2
7982:
799
800 add %g3, MAU_QUEUE, %g1 ! %g1 = &maus[mid].mau_queue
801
802 ldx [%o1 + NQ_BASE], %g2
803 brnz,a,pt %g2, .v1_qconf_config
804 ldx [%o1 + NQ_END], %g3
805 /*
806 * Caller wishes to unconfigure the mau_queue entry
807 * for the given MAU.
808 */
809 ld [%g1 + MQ_BUSY], %g4
810 brnz,pn %g4, herr_wouldblock
811 nop
812 stx %g0, [%g1 + MQ_BASE]
813 stx %g0, [%g1 + MQ_END]
814 stx %g0, [%g1 + MQ_HEAD]
815 stx %g0, [%g1 + MQ_TAIL]
816 stx %g0, [%g1 + MQ_NENTRIES]
817
818 HCALL_RET(EOK)
819
820.v1_qconf_config:
821 /*
822 * %g2 = nq_base
823 * %g3 = nq_end
824 */
825 or %g2, %g3, %g5
826 btst NCS_PTR_ALIGN - 1, %g5
827 bnz,pn %xcc, herr_badalign
828 nop
829
830 sub %g3, %g2, %g5 ! %g5 = queue size (end-base)
831 /*
832 * %g2 (RA(nq_base) -> PA(nq_base))
833 */
834 GUEST_STRUCT(%g4)
835 RA2PA_RANGE_CONV_UNK_SIZE(%g4, %g2, %g5, herr_noraddr, %g6, %g7)
836 mov %g7, %g2
837 /*
838 * %g3 (RA(nq_end) -> PA(nq_end))
839 */
840 RA2PA_RANGE_CONV(%g4, %g3, 8, herr_noraddr, %g6, %g7)
841 mov %g7, %g3
842
843 /*
844 * Verify that the queue size is what
845 * we would expect, i.e. (nq_nentries << NCS_HVDESC_SHIFT)
846 */
847 ldx [%o1 + NQ_NENTRIES], %g6
848 sllx %g6, NCS_HVDESC_SHIFT, %g7
849 cmp %g5, %g7
850 bne,pn %xcc, herr_inval
851 nop
852
853 stx %g2, [%g1 + MQ_BASE]
854 /*
855 * Head and Tail initially point to Base.
856 */
857 stx %g2, [%g1 + MQ_HEAD]
858 stx %g2, [%g1 + MQ_TAIL]
859
860 stx %g3, [%g1 + MQ_END]
861 stx %g6, [%g1 + MQ_NENTRIES]
862
863 HCALL_RET(EOK)
864
865 SET_SIZE(ncs_v10_qconf)
866
867/*
868 *-----------------------------------------------------------
869 * Function: ncs_qconf(uint64_t qtype, uint64_t baseaddr, uint64_t nentries)
870 * Arguments:
871 * Input:
872 * %o0 - queue type
873 * %o1 - base real address of queue or queue handle if
874 * unconfiguring a queue.
875 * %o2 - number of entries in queue
876 * Output:
877 * %o0 - EOK (on success),
878 * EINVAL, ENOACCESS, EBADALIGN,
879 * ENORADDR (on failure)
880 * %o1 - queue handle for respective queue.
881 *-----------------------------------------------------------
882 */
883 ENTRY_NP(hcall_ncs_qconf)
884
885 VCPU_GUEST_STRUCT(%g2, %g1)
886
887 cmp %o0, NCS_QTYPE_MAU
888 be ncs_qconf_mau
889 nop
890
891 IS_NCS_QTYPE_CWQ(%o0, NCS_QTYPE_CWQ, ncs_qconf_cwq)
892
893 HCALL_RET(EINVAL)
894
895 SET_SIZE(hcall_ncs_qconf)
896
897/*
898 *-----------------------------------------------------------
899 * Function: ncs_qconf_mau
900 * Arguments:
901 * Input:
902 * %o1 - base real address of queue or queue handle if
903 * unconfiguring a queue.
904 * %o2 - number of entries in queue.
905 * %g1 - guest struct
906 * %g2 - cpu struct
907 * Output:
908 * %o0 - EOK (on success),
909 * EINVAL, ENOACCESS, EBADALIGN,
910 * ENORADDR, EWOULDBLOCK (on failure)
911 * %o1 - queue handle for respective queue.
912 *-----------------------------------------------------------
913 */
914 ENTRY_NP(ncs_qconf_mau)
915
916 VCPU_GUEST_STRUCT(%g2, %g1)
917 brz,pn %o2, .m_qconf_unconfig
918 nop
919
920 cmp %o2, NCS_MIN_MAU_NENTRIES
921 blu,pn %xcc, herr_inval
922 nop
923 /*
924 * Check that #entries is a power of two.
925 */
926 sub %o2, 1, %g3
927 andcc %o2, %g3, %g0
928 bnz,pn %xcc, herr_inval
929 nop
930
931 ldx [%g2 + CPU_MAU], %g3
932 brz,pn %g3, herr_noaccess
933 nop
934 /*
935 * The cpu that does the queue configure will also
936 * be the one targeted for all the interrupts for
937 * this mau. We need to effectively single thread
938 * the interrupts per-mau because the interrupt handler
939 * updates global per-mau data structures.
940 */
941 VCPU2STRAND_STRUCT(%g2, %g7)
942 ldub [%g7 + STRAND_ID], %o0
943 /*
944 * Make sure base address is size aligned.
945 */
946 sllx %o2, NCS_HVDESC_SHIFT, %g4
947 sub %g4, 1, %g2
948 btst %g2, %o1
949 bnz,pn %xcc, herr_badalign
950 nop
951
952 MAU_LOCK_ENTER(%g3, %g5, %g2, %g6)
953 /*
954 * Translate base address from real to physical.
955 */
956 RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, .m_qconf_noraddr, %g6, %g2)
957
958 stx %o0, [%g3 + MAU_QUEUE + MQ_CPU_PID]
959 stx %o1, [%g3 + MAU_QUEUE + MQ_BASE_RA]
960 stx %g2, [%g3 + MAU_QUEUE + MQ_BASE]
961 stx %g2, [%g3 + MAU_QUEUE + MQ_HEAD]
962 stx %g2, [%g3 + MAU_QUEUE + MQ_TAIL]
963 add %g2, %g4, %g2
964 stx %g2, [%g3 + MAU_QUEUE + MQ_END]
965 stx %o2, [%g3 + MAU_QUEUE + MQ_NENTRIES]
966 st %g0, [%g3 + MAU_QUEUE + MQ_BUSY]
967 stx %g0, [%g3 + MAU_QUEUE + MQ_HEAD_MARKER]
968 mov NCS_QSTATE_CONFIGURED, %g1
969 st %g1, [%g3 + MAU_QUEUE + MQ_STATE]
970
971 ldx [%g3 + MAU_HANDLE], %o1
972
973 MAU_LOCK_EXIT_L(%g5)
974
975 HCALL_RET(EOK)
976
977.m_qconf_noraddr:
978 MAU_LOCK_EXIT_L(%g5)
979
980 HCALL_RET(ENORADDR)
981
982.m_qconf_unconfig:
983
984 MAU_HANDLE2ID_VERIFY(%o1, herr_inval, %g2)
985 GUEST_MID_GETMAU(%g1, %g2, %g3)
986 brz,pn %g3, herr_noaccess
987 nop
988
989 MAU_LOCK_ENTER(%g3, %g5, %g1, %g6)
990
991 ld [%g3 + MAU_QUEUE + MQ_BUSY], %g4
992 brnz,pn %g4, .m_qconf_wouldblock
993 nop
994
995 MAU_CLEAR_QSTATE(%g3)
996 mov NCS_QSTATE_UNCONFIGURED, %g1
997 st %g1, [%g3 + MAU_QUEUE + MQ_STATE]
998
999 MAU_LOCK_EXIT_L(%g5)
1000
1001 HCALL_RET(EOK)
1002
1003.m_qconf_wouldblock:
1004 MAU_LOCK_EXIT_L(%g5)
1005
1006 HCALL_RET(EWOULDBLOCK)
1007
1008 SET_SIZE(ncs_qconf_mau)
1009
1010/*
1011 *-----------------------------------------------------------
1012 * Function: ncs_qinfo(uint64_t qhandle)
1013 * Arguments:
1014 * Input:
1015 * %o0 - queue handle
1016 * Output:
1017 * %o0 - EOK (on success),
1018 * EINVAL (on failure)
1019 * %o1 - queue type
1020 * %o2 - queue base real address
1021 * %o3 - number of queue entries
1022 *-----------------------------------------------------------
1023 */
1024 ENTRY_NP(hcall_ncs_qinfo)
1025
1026 GUEST_STRUCT(%g1)
1027
1028 HANDLE_IS_MAU(%o0, %g2)
1029 bne %xcc, 0f
1030 nop
1031
1032 MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
1033 GUEST_MID_GETMAU(%g1, %g2, %g3)
1034 brz,pn %g3, herr_inval
1035 nop
1036
1037 MAU_LOCK_ENTER(%g3, %g2, %g5, %g6)
1038
1039 mov NCS_QTYPE_MAU, %o1
1040 ldx [%g3 + MAU_QUEUE + MQ_BASE_RA], %o2
1041 ldx [%g3 + MAU_QUEUE + MQ_NENTRIES], %o3
1042
1043 MAU_LOCK_EXIT_L(%g2)
1044
1045 HCALL_RET(EOK)
1046
10470:
1048 HCALL_NCS_QINFO_CWQ()
1049
1050 SET_SIZE(hcall_ncs_qinfo)
1051
1052/*
1053 *-----------------------------------------------------------
1054 * Function: ncs_gethead(uint64_t qhandle)
1055 * Arguments:
1056 * Input:
1057 * %o0 - queue handle
1058 * Output:
1059 * %o0 - EOK (on success),
1060 * EINVAL (on failure)
1061 * %o1 - queue head offset
1062 *-----------------------------------------------------------
1063 */
1064 ENTRY_NP(hcall_ncs_gethead)
1065
1066 VCPU_GUEST_STRUCT(%g7, %g1)
1067
1068 HANDLE_IS_MAU(%o0, %g2)
1069 bne %xcc, 0f
1070 nop
1071
1072 MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
1073 GUEST_MID_GETMAU(%g1, %g2, %g3)
1074 brz,pn %g3, herr_inval
1075 nop
1076
1077 MAU_LOCK_ENTER(%g3, %g5, %g2, %g6)
1078
1079 ldx [%g3 + MAU_QUEUE + MQ_BASE], %g1
1080 ldx [%g3 + MAU_QUEUE + MQ_HEAD], %g2
1081 sub %g2, %g1, %o1
1082
1083 MAU_LOCK_EXIT_L(%g5)
1084
1085 HCALL_RET(EOK)
1086
10870:
1088
1089 HCALL_NCS_GETHEAD_CWQ()
1090
1091 SET_SIZE(hcall_ncs_gethead)
1092
1093/*
1094 *-----------------------------------------------------------
1095 * Function: ncs_gettail(uint64_t qhandle)
1096 * Arguments:
1097 * Input:
1098 * %o0 - queue handle
1099 * Output:
1100 * %o0 - EOK (on success),
1101 * EINVAL (on failure)
1102 * %o1 - queue tail offset
1103 *-----------------------------------------------------------
1104 */
1105 ENTRY_NP(hcall_ncs_gettail)
1106
1107 VCPU_GUEST_STRUCT(%g7, %g1)
1108
1109 HANDLE_IS_MAU(%o0, %g2)
1110 bne %xcc, 0f
1111 nop
1112
1113 MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
1114 GUEST_MID_GETMAU(%g1, %g2, %g3)
1115 brz,pn %g3, herr_inval
1116 nop
1117
1118 MAU_LOCK_ENTER(%g3, %g5, %g2, %g6)
1119
1120 ldx [%g3 + MAU_QUEUE + MQ_BASE], %g1
1121 ldx [%g3 + MAU_QUEUE + MQ_TAIL], %g2
1122 sub %g2, %g1, %o1
1123
1124 MAU_LOCK_EXIT_L(%g5)
1125
1126 HCALL_RET(EOK)
1127
11280:
1129
1130 HCALL_NCS_GETTAIL_CWQ()
1131
1132 SET_SIZE(hcall_ncs_gettail)
1133
1134/*
1135 *-----------------------------------------------------------
1136 * Function: ncs_qhandle_to_devino(uint64_t qhandle)
1137 * Arguments:
1138 * Input:
1139 * %o0 - queue handle
1140 * Output:
1141 * %o0 - EOK (on success),
1142 * EINVAL (on failure)
1143 * %o1 - devino
1144 *-----------------------------------------------------------
1145 */
1146 ENTRY_NP(hcall_ncs_qhandle_to_devino)
1147
1148 GUEST_STRUCT(%g1)
1149
1150 HANDLE_IS_MAU(%o0, %g2)
1151 bne %xcc, 0f
1152 nop
1153
1154 MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
1155 GUEST_MID_GETMAU(%g1, %g2, %g3)
1156 brz,pn %g3, herr_inval
1157 nop
1158
1159 ldx [%g3 + MAU_INO], %o1
1160
1161 HCALL_RET(EOK)
1162
11630:
1164 HCALL_NCS_QHANDLE_TO_DEVINO_CWQ()
1165
1166 SET_SIZE(hcall_ncs_qhandle_to_devino)
1167
1168/*
1169 *-----------------------------------------------------------
1170 * Function: ncs_sethead_marker(uint64_t qhandle, uint64_t new_headoffset)
1171 * Arguments:
1172 * Input:
1173 * %o0 - queue handle
1174 * %o1 - new head offset
1175 * Output:
1176 * %o0 - EOK (on success),
1177 * EINVAL, ENORADDR (on failure)
1178 *-----------------------------------------------------------
1179 */
1180 ENTRY_NP(hcall_ncs_sethead_marker)
1181
1182 GUEST_STRUCT(%g1)
1183
1184 HANDLE_IS_MAU(%o0, %g2)
1185 bne %xcc, 0f
1186 nop
1187
1188 MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
1189 GUEST_MID_GETMAU(%g1, %g2, %g3)
1190 brz,pn %g3, herr_inval
1191 nop
1192
1193 btst NCS_HVDESC_SIZE - 1, %o1
1194 bnz,a,pn %xcc, herr_inval
1195 nop
1196
1197 MAU_LOCK_ENTER(%g3, %g5, %g2, %g6)
1198
1199 ldx [%g3 + MAU_QUEUE + MQ_BASE], %g1
1200 add %g1, %o1, %g1
1201 ldx [%g3 + MAU_QUEUE + MQ_END], %g2
1202 cmp %g1, %g2
1203 blu,a,pn %xcc, 1f
1204 stx %g1, [%g3 + MAU_QUEUE + MQ_HEAD_MARKER]
1205
1206 MAU_LOCK_EXIT_L(%g5)
1207
1208 HCALL_RET(EINVAL)
1209
12101:
1211 MAU_LOCK_EXIT_L(%g5)
1212
1213 HCALL_RET(EOK)
1214
12150:
1216 HCALL_NCS_SETHEAD_MARKER_CWQ()
1217
1218 SET_SIZE(hcall_ncs_sethead_marker)
1219
1220/*
1221 *-----------------------------------------------------------
1222 * Function: ncs_settail(uint64_t qhandle, uint64_t new_tailoffset)
1223 * Arguments:
1224 * Input:
1225 * %o0 - queue handle
1226 * %o1 - new tail offset
1227 * Output:
1228 * %o0 - EOK (on success),
1229 * EINVAL, ENORADDR (on failure)
1230 *-----------------------------------------------------------
1231 */
1232 ENTRY_NP(hcall_ncs_settail)
1233
1234 VCPU_GUEST_STRUCT(%g7, %g1)
1235
1236 HANDLE_IS_MAU(%o0, %g2)
1237 be %xcc, ncs_settail_mau
1238 nop
1239
1240 HANDLE_IS_CWQ_BRANCH(%o0, %g2, ncs_settail_cwq)
1241
1242 HCALL_RET(EINVAL)
1243
1244 SET_SIZE(hcall_ncs_settail)
1245
1246/*
1247 *-----------------------------------------------------------
1248 * Function: ncs_settail_mau(uint64_t qhandle, uint64_t new_tailoffset)
1249 * Arguments:
1250 * Input:
1251 * %o0 - queue handle
1252 * %o1 - new tail offset
1253 * %g1 - guest struct
1254 * %g7 - cpu struct
1255 * Output:
1256 * %o0 - EOK (on success),
1257 * EINVAL, ENORADDR (on failure)
1258 *-----------------------------------------------------------
1259 */
1260 ENTRY_NP(ncs_settail_mau)
1261
1262 MAU_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
1263 GUEST_MID_GETMAU(%g1, %g2, %g3)
1264 brz,pn %g3, herr_inval
1265 nop
1266 /*
1267 * Verify that we're on the MAU that the
1268 * caller specified.
1269 */
1270 ldx [%g7 + CPU_MAU], %g4
1271 cmp %g4, %g3
1272 bne,pn %xcc, herr_inval
1273 nop
1274
1275 btst NCS_HVDESC_SIZE - 1, %o1
1276 bnz,a,pn %xcc, herr_inval
1277 nop
1278
1279 ldx [%g3 + MAU_QUEUE + MQ_BASE], %g1
1280 add %g1, %o1, %g1
1281 ldx [%g3 + MAU_QUEUE + MQ_END], %g2
1282 cmp %g1, %g2
1283 bgeu,pn %xcc, herr_inval
1284 nop
1285
1286 MAU_LOCK_ENTER(%g3, %g5, %g4, %g6)
1287
1288 /*
1289 * Update MQ_BUSY to indicate we're going to have work
1290 * pending. If the current MQ_BUSY is non-zero then
1291 * that indicates that queue has jobs and is being
1292 * managed asynchronously (via mau_intr).
1293 */
1294 mov -1, %g4
1295 ld [%g3 + MAU_QUEUE + MQ_BUSY], %g2
1296 st %g4, [%g3 + MAU_QUEUE + MQ_BUSY]
1297 brz,pt %g2, .st_mau_dowork
1298 ldx [%g3 + MAU_QUEUE + MQ_HEAD], %g2
1299 /*
1300 * Queue already busy indicating queue is being
1301 * actively managed by interrupt handler. So,
1302 * all we have to do is insert job at tail and
1303 * we're done.
1304 */
1305 ldx [%g3 + MAU_QUEUE + MQ_TAIL], %g4
1306 cmp %g2, %g4
1307 be %xcc, .st_mau_dowork
1308 nop
1309
1310 stx %g1, [%g3 + MAU_QUEUE + MQ_TAIL]
1311 MAU_LOCK_EXIT_L(%g5)
1312 ba hret_ok
1313 nop
1314
1315.st_mau_dowork:
1316
1317 stx %g1, [%g3 + MAU_QUEUE + MQ_TAIL]
1318 !!
1319 !! %g2 = mq_head
1320 !! %g1 = mq_tail
1321 !!
1322 /*
1323 * Use the per-cwq assigned cpu as target
1324 * for interrupts for this job.
1325 */
1326 ldx [%g3 + MAU_QUEUE + MQ_CPU_PID], %g7
1327 and %g7, NSTRANDS_PER_MAU_MASK, %g7 ! pid -> hw tid
1328 !!
1329 !! %g7 = hw thread-id
1330 !!
1331
1332.st_mau_loop:
1333 cmp %g2, %g1 ! mq_head == mq_tail?
1334 be,pn %xcc, .st_mau_done
1335 nop
1336 /*
1337 * Mark current descriptor busy.
1338 */
1339 mov ND_STATE_BUSY, %o0
1340 stx %o0, [%g2 + NHD_STATE] ! nhd_state = BUSY
1341 add %g2, NHD_REGS, %g2
1342 !!
1343 !! %g2 = ncs_hvdesc.nhd_regs
1344 !! $g7 = hw thread-id
1345 !!
1346 MAU_LOAD(%g2, %g7, %o0, 1, .st_mau_addr_err, .st_mau_chk_rv, %o1, %o5, %g4)
1347 /*
1348 * We're done. The rest will be handled by MAU
1349 * interrupt handler. Leave MQ_BUSY set.
1350 */
1351 MAU_LOCK_EXIT(%g3, %g5)
1352
1353 ba hret_ok
1354 nop
1355
1356.st_mau_chk_rv:
1357 /*
1358 * Determine appropriate state to set descriptor to.
1359 */
1360 mov ND_STATE_DONE, %o5
1361 movrnz %o0, ND_STATE_ERROR, %o5
1362
1363.st_mau_set_state:
1364 sub %g2, NHD_REGS, %g2
1365 !!
1366 !! %g2 = &ncs_hvdesc
1367 !!
1368 stx %o5, [%g2 + NHD_STATE]
1369
1370 ldx [%g2 + NHD_TYPE], %o1
1371 and %o1, ND_TYPE_END, %o1
1372
1373 add %g2, NCS_HVDESC_SIZE, %g2 ! mq_head++
1374 ldx [%g3 + MAU_QUEUE + MQ_END], %g5
1375 ldx [%g3 + MAU_QUEUE + MQ_BASE], %g4
1376 cmp %g2, %g5 ! mq_head == mq_end?
1377 movgeu %xcc, %g4, %g2 ! mq_head = mq_base
1378 stx %g2, [%g3 + MAU_QUEUE + MQ_HEAD]
1379 /*
1380 * If previous descriptor was not in error or was the
1381 * last one in a job, then check the next descriptor
1382 * for normal processing.
1383 */
1384 brnz,pn %o1, .st_mau_loop ! last descriptor?
1385 cmp %o5, ND_STATE_ERROR
1386 bne,pn %xcc, .st_mau_loop
1387 nop
1388 /*
1389 * If we reach here then we encountered an
1390 * error on a descriptor within the middle
1391 * of a job. Need to pop the entire job
1392 * off the queue. We stop popping descriptors
1393 * off until we either hit the Last one or
1394 * hit the Tail of the queue.
1395 * Note that we set state in all remaining
1396 * descriptors in job to Error (ND_STATE_ERROR).
1397 */
1398 !!
1399 !! %o5 = ND_STATE_ERROR
1400 !!
1401 cmp %g2, %g1 ! queue empty?
1402 bne,pt %xcc, .st_mau_set_state
1403 add %g2, NHD_REGS, %g2
1404
1405.st_mau_done:
1406 st %g0, [%g3 + MAU_QUEUE + MQ_BUSY]
1407
1408 MAU_LOCK_EXIT(%g3, %g5)
1409
1410 ba hret_ok
1411 nop
1412
1413.st_mau_addr_err:
1414 ba .st_mau_chk_rv
1415 mov ENORADDR, %o0
1416
1417 SET_SIZE(ncs_settail_mau)