Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / huron / src / cwq.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: cwq.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)cwq.s 1.2 07/05/21 SMI"
50
51 .file "cwq.s"
52
53/*
54 * Niagara2 CWQ support
55 */
56
57#include <sys/asm_linkage.h>
58#include <sys/htypes.h>
59#include <hypervisor.h>
60#include <sparcv9/misc.h>
61#include <sparcv9/asi.h>
62#include <asi.h>
63#include <mmu.h>
64#include <sun4v/traps.h>
65#include <sun4v/asi.h>
66#include <sun4v/mmu.h>
67#include <sun4v/queue.h>
68#include <devices/pc16550.h>
69
70#include <debug.h>
71#include <config.h>
72#include <guest.h>
73#include <cwq.h>
74#include <mau.h>
75#include <md.h>
76#include <abort.h>
77#include <offsets.h>
78#include <ncs.h>
79#include <util.h>
80
81/*
82 *-----------------------------------------------------------
83 * Function: setup_cwq
84 * Called via setup_cpu() if the given cpu has access
85 * to a mau. If the handle is non-NULL then the mau
86 * struct has already been initialized.
87 * Arguments:
88 * Input:
89 * %i0 - CONFIG
90 * %g1 - cpu struct
91 * %g2 - ino
92 * %g7 - return address
93 * Output:
94 * %g1 - &config.cwqs[cwq-id] or NULL (0) if error.
95 *
96 * Uses: %g1-%g6,%l3
97 *-----------------------------------------------------------
98 */
99
100 ENTRY_NP(setup_cwq)
101
102 ldx [%g1 + CPU_CWQ], %g3
103 brz,pn %g3, 1f
104 nop
105 ldx [%g3 + CWQ_PID], %g6
106 cmp %g6, NCWQS
107 bgeu,a,pn %xcc, 1f
108 mov %g0, %g3
109
110 VCPU2STRAND_STRUCT(%g1, %g5)
111 ldub [%g5 + STRAND_ID], %g5
112 and %g5, NSTRANDS_PER_CWQ_MASK, %g5 ! %g5 = hw thread-id
113 mov 1, %g4
114 sllx %g4, %g5, %g4
115 ldx [%g3 + CWQ_CPUSET], %g6
116 btst %g4, %g6
117 bnz,pn %xcc, 1f
118 nop
119 bset %g4, %g6
120 stx %g6, [%g3 + CWQ_CPUSET]
121
122 add %g5, CWQ_CPU_ACTIVE, %g5
123 mov -1, %g6
124 stb %g6, [%g3 + %g5]
125
126 ldx [%g3 + CWQ_CPUSET], %g5
127 cmp %g4, %g5 ! 1st (only) cpu?
128 bne,pt %xcc, 1f
129 nop
130
131 ldx [%g3 + CWQ_PID], %g6
132 ID2HANDLE(%g6, CWQ_HANDLE_SIG, %g6)
133 stx %g6, [%g3 + CWQ_HANDLE]
134 stx %g2, [%g3 + CWQ_INO]
135 /*
136 * Now set up cwq queue.
137 */
138 CWQ_CLEAR_QSTATE(%g3)
139 /*
140 * Now set up interrupt stuff.
141 */
142 ldx [%g1 + CPU_GUEST], %g1
143 LABEL_ADDRESS(cwq_intr_getstate, %g4)
144 mov %g0, %g5
145 !!
146 !! %g1 = guestp
147 !! %g2 = ino
148 !! %g3 = &config.cwqs[cwq-id]
149 !! %g4 = cwq_intr_getstate
150 !! %g5 = NULL (no setstate callback)
151 !! %g7 = return pc (set up in setup_cpu())
152 !!
153 /*
154 * Note that vdev_intr_register() clobbers %g1,%g3,%g5-%g7.
155 */
156 mov %g7, %l3 ! save return pc
157 HVCALL(vdev_intr_register)
158 stx %g1, [%g3 + CWQ_IHDLR + CI_COOKIE]
159
160 mov CWQ_STATE_RUNNING, %g2
161 stx %g2, [%g3 + CWQ_STATE]
162
163 mov %l3, %g7 ! restore return pc
1641:
165 mov %g3, %g1 ! return &cwqs[cwq-id]
166 HVRET
167
168 SET_SIZE(setup_cwq)
169
170/*
171 * Wrapper around setup_cwq, so it can be called from C
172 * SPARC ABI requries only that g2,g3,g4 are preserved across
173 * function calls.
174 * %g1 - cpu struct
175 * %g2 - ino
176 * %g3 - config
177 * Output:
178 * %g1 - &config.cwqs[cwq-id] or NULL (0) if error.
179 *
180 * cwqp = c_setup_cwq(vcpup, ino, &config);
181 *
182 */
183
184 ENTRY(c_setup_cwq)
185
186 STRAND_PUSH(%g2, %g6, %g7)
187 STRAND_PUSH(%g3, %g6, %g7)
188 STRAND_PUSH(%g4, %g6, %g7)
189
190 mov %o0, %g1
191 mov %o1, %g2
192 mov %o2, %g3
193 HVCALL(setup_cwq)
194 mov %g1, %o0
195
196 STRAND_POP(%g4, %g6)
197 STRAND_POP(%g3, %g6)
198 STRAND_POP(%g2, %g6)
199
200 retl
201 nop
202 SET_SIZE(c_setup_cwq)
203
204/*
205 *-----------------------------------------------------------
206 * Function: cwq_intr()
207 * Called from within trap context.
208 * Arguments:
209 * Input:
210 * %g1 - cpu struct
211 * Output:
212 *-----------------------------------------------------------
213 */
214 ENTRY_NP(cwq_intr)
215
216 ldx [%g1 + CPU_CWQ], %g2
217 brz,pn %g2, .ci_exit_nolock
218 nop
219
220 CWQ_LOCK_ENTER(%g2, %g5, %g4, %g6)
221
222 ldx [%g2 + CWQ_STATE], %g3
223 cmp %g3, CWQ_STATE_RUNNING
224 bne,pn %xcc, .ci_exit
225 nop
226
227 /*
228 * Read CSR (contains error info) and check for errors.
229 * If there were no errors, set bit 50 of all finished
230 * Control Words to 1 indicating that the hardware has finished
231 * processing of these Control Words.
232 * If there were errors we'll need to do some work.
233 * We set bit 50 to 1, and copy the CSR error indicator
234 * bits hwe and protocolerror to bits 52 and 51, resp.
235 * of the Control Word causing the error and each following
236 * Control Word of the Control Word Block it belongs to.
237 */
238
239 mov ASI_SPU_CWQ_CSR, %g4
240 ldxa [%g4]ASI_STREAM, %g7
241 mov ASI_SPU_CWQ_HEAD, %g4
242 ldxa [%g4]ASI_STREAM, %g3
243
244
245 ldx [%g2 + CWQ_QUEUE + CQ_DR_HEAD], %g4
246 ldx [%g2 + CWQ_QUEUE + CQ_DR_HV_OFFSET], %g6
247 sub %g4, %g6, %g5
248 cmp %g5, %g3
249 be,pn %xcc, .ci_exit ! phantom interrupt, we already
250 nop ! processed this CW
251
252 andcc %g7, CWQ_CSR_ERROR, %g7
253 bz,pt %xcc, .ci_no_error
254 nop
255
256.ci_still_good_loop:
257 /*
258 * %g7 interrupt bits
259 * %g4 driver's first non-processed CW entry
260 * %g3 HV's CWQ head
261 */
262 ldx [%g2 + CWQ_QUEUE + CQ_DR_HV_OFFSET], %g6
263 sub %g4, %g6, %g6
264 cmp %g6, %g3
265 be,pn %xcc, .ci_error
266 nop
267
268 ldx [%g4], %g5
269 mov 1, %g6
270 sllx %g6, CW_RES_SHIFT, %g6
271 or %g6, %g5, %g5
272 stx %g5, [%g4]
273
274 add %g4, CWQ_CW_SIZE, %g5
275 ldx [%g2 + CWQ_QUEUE + CQ_DR_LAST], %g6
276 cmp %g5, %g6 ! next == Last?
277 ldx [%g2 + CWQ_QUEUE + CQ_DR_BASE], %g6
278 movgu %xcc, %g6, %g5 ! next = First
279 ba .ci_still_good_loop
280 mov %g5, %g4
281
282.ci_error:
283
284 /*
285 * at this point, we have processed everything up to
286 * cwq[head - 1]. and have an error at cwq[head]
287 */
288
289 mov %g5, %g3
290 ldx [%g2 + CWQ_QUEUE + CQ_DR_HV_OFFSET], %g5
291 mov ASI_SPU_CWQ_TAIL, %g4
292 ldxa [%g4]ASI_STREAM, %g4
293 add %g4, %g5, %g4
294 stx %g4, [%g2 + CWQ_QUEUE + CQ_DR_TAIL] ! tail
295
296 /*
297 * CWQ state will be:
298 * - Head = CW in error (within CW Block)
299 * - CWQ = disabled
300 * We'll mark all remaining CWs in this CW Block
301 * with the error bit then move Head to next
302 * CW Block and reenable CWQ. Note that we will
303 * not move the Head beyond the Tail. In theory this
304 * should never happen, but since we rely on software
305 * to properly set the EOB bit we have to guard against
306 * it not being set and this code being stuck in an
307 * infinite loop.
308 */
309
310 srl %g7, CWQ_CSR_ERROR_SHIFT - 1, %g6
311 or %g6, 1, %g6
312
313 ldx [%g3], %g5
314
315
316.ci_chkeob:
317 sllx %g6, CW_RES_SHIFT, %g7
318 or %g5, %g7, %g5
319 stx %g5, [%g3]
320
321 srlx %g5, CW_EOB_SHIFT, %g5
322 btst CW_EOB_MASK, %g5
323 bnz,pt %xcc, .ci_finalcw
324 nop
325
326 add %g3, CWQ_CW_SIZE, %g3
327 ldx [%g2 + CWQ_QUEUE + CQ_DR_LAST], %g7
328 cmp %g3, %g7 ! current == Last?
329 ldx [%g2 + CWQ_QUEUE + CQ_DR_BASE], %g7
330 movgu %xcc, %g7, %g3 ! current = First
331 ldx [%g2 + CWQ_QUEUE + CQ_DR_TAIL], %g7
332 cmp %g3, %g7 ! current == Tail?
333 bne,pt %xcc, .ci_chkeob
334 ldx [%g3], %g5
335
336.ci_finalcw:
337 /*
338 * Move the Head to the next CW.
339 */
340 add %g3, CWQ_CW_SIZE, %g3
341 ldx [%g2 + CWQ_QUEUE + CQ_DR_LAST], %g7
342 cmp %g3, %g7
343 ldx [%g2 + CWQ_QUEUE + CQ_DR_BASE], %g7
344 movgu %xcc, %g7, %g3
345 stx %g3, [%g2 + CWQ_QUEUE + CQ_DR_HEAD]
346 ldx [%g2 + CWQ_QUEUE + CQ_DR_HV_OFFSET], %g5
347 sub %g3, %g5, %g3
348 mov ASI_SPU_CWQ_HEAD, %g4
349 stxa %g3, [%g4]ASI_STREAM
350 stx %g3, [%g2 + CWQ_QUEUE + CQ_HEAD]
351
352 /*
353 * Clear the CWQ state and reenable the CWQ.
354 */
355
356#ifdef ERRATA_192
357 ldx [%g1 + CPU_MAU], %g6
358 MAU_LOCK_ENTER(%g6, %g1, %g4, %o0)
359 ldx [%g6 + MAU_STORE_IN_PROGR], %g4
360 sub %g4, 1, %g4
361 brnz,pt %g4, .ci_do_enable
362 nop
363 mov 1, %g4
364 stx %g4, [%g6 + MAU_ENABLE_CWQ]
365 ba .ci_cwq_enable_done
366 nop
367.ci_do_enable:
368#endif
369
370 mov ASI_SPU_CWQ_CSR, %g4
371 mov CWQ_CSR_ENABLED, %g6
372 stxa %g6, [%g4]ASI_STREAM
373
374#ifdef ERRATA_192
375.ci_cwq_enable_done:
376 MAU_LOCK_EXIT_L(%g1)
377#endif
378
379 ba .ci_finish
380 nop
381
382.ci_no_error:
383 andcc %g6, CWQ_CSR_BUSY, %g6
384 bz,pt %xcc, .ci_no_error_loop
385 mov %g0, %g7 ! collect interrupt bits in %g7
386
387 /*
388 * if the busy bit was set in the CSR, we leave the last entry for
389 * the next invocation of the interrupt handler, as the result of
390 * the last CW might not be globally visible yet
391 */
392 sub %g3, CWQ_CW_SIZE, %g3
393 ldx [%g2 + CWQ_QUEUE + CQ_BASE], %g6
394 cmp %g6, %g3 ! previous < First?
395 ldx [%g2 + CWQ_QUEUE + CQ_LAST], %g6
396 movgu %xcc, %g6, %g3 ! pervious = Last
397
398.ci_no_error_loop:
399 /*
400 * %g7 interrupt bits
401 * %g4 driver's first non-processed CW entry
402 * %g3 HV's CWQ head
403 */
404 ldx [%g2 + CWQ_QUEUE + CQ_DR_HV_OFFSET], %g6
405 sub %g4, %g6, %g6
406 cmp %g6, %g3
407 be,pn %xcc, .ci_no_error_done
408 nop
409
410 ldx [%g4], %g5
411 or %g5, %g7, %g7 !collecting the intr bits
412 mov 1, %g6
413 sllx %g6, CW_RES_SHIFT, %g6
414 or %g6, %g5, %g5
415 stx %g5, [%g4]
416
417 add %g4, CWQ_CW_SIZE, %g5
418 ldx [%g2 + CWQ_QUEUE + CQ_DR_LAST], %g6
419 cmp %g5, %g6 ! next == Last?
420 ldx [%g2 + CWQ_QUEUE + CQ_DR_BASE], %g6
421 movgu %xcc, %g6, %g5 ! next = First
422 ba .ci_no_error_loop
423 mov %g5, %g4
424
425.ci_no_error_done:
426
427 stx %g4, [%g2 + CWQ_QUEUE + CQ_DR_HEAD]
428 ldx [%g2 + CWQ_QUEUE + CQ_DR_HV_OFFSET], %g3
429 sub %g4, %g3, %g3
430 stx %g3, [%g2 + CWQ_QUEUE + CQ_HEAD]
431 mov %g7, %g5
432
433.ci_finish:
434 srlx %g7, CW_INTR_SHIFT, %g7
435 and %g7, CW_INTR_MASK, %g7
436
437 ldx [%g2 + CWQ_IHDLR + CI_COOKIE], %g1
438 brz,pn %g1, .ci_exit
439 nop
440
441 CWQ_LOCK_EXIT(%g2, %g6)
442
443 brz,pt %g7, .ci_exit_nolock ! don't generate interrupt
444 nop
445
446 HVCALL(vdev_intr_generate)
447 retry
448
449.ci_exit:
450 CWQ_LOCK_EXIT(%g2, %g5)
451
452.ci_exit_nolock:
453 retry
454
455 SET_SIZE(cwq_intr)
456
457
458/*
459 *-----------------------------------------------------------
460 * Function: cwq_intr_getstate()
461 * Arguments:
462 * Input:
463 * %g1 - cwqs[] struct
464 * %g7 - return pc
465 * Output:
466 *-----------------------------------------------------------
467 */
468 ENTRY_NP(cwq_intr_getstate)
469
470 /*
471 * Note that ideally we would get the actual Head
472 * from the hardware, however there is no guarantee
473 * that this routine will be called on the target
474 * core/cwq, so we have to rely on the Head being
475 * captured during cwq_intr time.
476 */
477 mov %g0, %g2
478 ldx [%g1 + CWQ_QUEUE + CQ_HEAD], %g3
479 ldx [%g1 + CWQ_QUEUE + CQ_HEAD_MARKER], %g4
480 cmp %g3, %g4
481 movne %xcc, 1, %g2
482 jmp %g7 + SZ_INSTR
483 mov %g2, %g1
484
485 SET_SIZE(cwq_intr_getstate)
486
487/*
488 *-----------------------------------------------------------
489 * Function: ncs_qconf_cwq
490 * Arguments:
491 * Input:
492 * %o1 - base real address of queue or queue handle if
493 * unconfiguring a queue.
494 * %o2 - number of entries in queue.
495 * %g1 - guest struct
496 * %g2 - cpu struct
497 * Output:
498 * %o0 - EOK (on success),
499 * EINVAL, ENOACCESS, EBADALIGN,
500 * ENORADDR, EWOULDBLOCK (on failure)
501 * %o1 - queue handle for respective queue.
502 *-----------------------------------------------------------
503 */
504 ENTRY_NP(ncs_qconf_cwq)
505
506 VCPU_GUEST_STRUCT(%g2, %g1)
507
508 ldx [%g2 + CPU_CWQ], %g3
509#ifdef ERRATA_192
510 ldx [%g2 + CPU_MAU], %g7
511#endif
512 brz,pn %o2, .c_qconf_unconfig
513 nop
514
515 cmp %o2, NCS_MIN_CWQ_NENTRIES
516 blu,pn %xcc, herr_inval
517 nop
518 cmp %o2, NCS_MAX_CWQ_NENTRIES
519 bgu,pn %xcc, herr_inval
520 nop
521 /*
522 * Check that #entries is a power of two.
523 */
524 sub %o2, 1, %g4
525 andcc %o2, %g4, %g0
526 bnz,pn %xcc, herr_inval
527 nop
528
529 brz,pn %g3, herr_noaccess
530 nop
531 /*
532 * The cpu that does the queue configure will also
533 * be the one targeted for all the interrupts for
534 * this cwq. We need to effectively single thread
535 * the interrupts per-cwq because the interrupt handler
536 * updates global per-cwq data structures.
537 */
538 VCPU2STRAND_STRUCT(%g2, %o0)
539 ldub [%o0 + STRAND_ID], %o0
540 /*
541 * Make sure base address is size aligned.
542 */
543 sllx %o2, CWQ_CW_SHIFT, %g4
544 sub %g4, 1, %g2
545 btst %g2, %o1
546 bnz,pn %xcc, herr_badalign
547 nop
548
549 CWQ_LOCK_ENTER(%g3, %g5, %g2, %g6)
550 /*
551 * Translate base address from real to physical.
552 */
553 RA2PA_RANGE_CONV_UNK_SIZE(%g1, %o1, %g4, .c_qconf_noraddr, %g2, %g6)
554
555 stx %o0, [%g3 + CWQ_QUEUE + CQ_CPU_PID]
556 stx %o1, [%g3 + CWQ_QUEUE + CQ_DR_BASE_RA]
557 stx %g6, [%g3 + CWQ_QUEUE + CQ_DR_BASE]
558 stx %g6, [%g3 + CWQ_QUEUE + CQ_DR_HEAD]
559 add %g3, CWQ_QUEUE + CQ_HV_CWS + CWQ_CW_SIZE - 1, %g2
560 and %g2, -CWQ_CW_SIZE, %g2
561 stx %g2, [%g3 + CWQ_QUEUE + CQ_BASE]
562 stx %g2, [%g3 + CWQ_QUEUE + CQ_HEAD]
563 sub %g6, %g2, %g6
564 stx %g6, [%g3 + CWQ_QUEUE + CQ_DR_HV_OFFSET]
565 add %g2, %g4, %g6
566 sub %g6, CWQ_CW_SIZE, %g6
567 stx %g6, [%g3 + CWQ_QUEUE + CQ_LAST]
568 ldx [%g3 + CWQ_QUEUE + CQ_DR_HV_OFFSET], %g4
569 add %g6, %g4, %g4
570 stx %g4, [%g3 + CWQ_QUEUE + CQ_DR_LAST]
571 stx %o2, [%g3 + CWQ_QUEUE + CQ_NENTRIES]
572 st %g0, [%g3 + CWQ_QUEUE + CQ_BUSY]
573 stx %g0, [%g3 + CWQ_QUEUE + CQ_HEAD_MARKER]
574 !!
575 !! %g2 = base
576 !! %g6 = (end - [1 cwq entry]) (last valid entry)
577 !!
578 /*
579 * Clear any errors and disable the CWQ.
580 */
581
582#ifdef ERRATA_192
583 MAU_LOCK_ENTER(%g7, %g1, %g4, %o0)
584#endif
585
586 mov ASI_SPU_CWQ_CSR, %g4
587 stxa %g0, [%g4]ASI_STREAM
588 /*
589 * Load up CWQ pointers.
590 * first = head = tail = %g2 (base)
591 * last = %g6 (end-1)
592 */
593 mov ASI_SPU_CWQ_FIRST, %g4
594 stxa %g2, [%g4]ASI_STREAM
595
596 mov ASI_SPU_CWQ_LAST, %g4
597 stxa %g6, [%g4]ASI_STREAM
598
599 mov ASI_SPU_CWQ_HEAD, %g4
600 stxa %g2, [%g4]ASI_STREAM
601
602 mov ASI_SPU_CWQ_TAIL, %g4
603 stxa %g2, [%g4]ASI_STREAM
604
605 /*
606 * First and Last have been set. Now ready to
607 * enable the SPU.
608 */
609
610#ifdef ERRATA_192
611 ldx [%g7 + MAU_STORE_IN_PROGR], %g4
612 sub %g4, 1, %g4
613 brnz,pt %g4, .c_qconf_do_enable
614 nop
615 mov 1, %g4
616 stx %g4, [%g7 + MAU_ENABLE_CWQ]
617 ba .c_qconf_done
618 nop
619.c_qconf_do_enable:
620#endif
621
622 mov CWQ_CSR_ENABLED, %o0
623 mov ASI_SPU_CWQ_CSR_ENABLE, %g4
624 stxa %o0, [%g4]ASI_STREAM
625
626#ifdef ERRATA_192
627.c_qconf_done:
628 MAU_LOCK_EXIT_L(%g1)
629#endif
630
631 mov NCS_QSTATE_CONFIGURED, %g1
632 st %g1, [%g3 + CWQ_QUEUE + CQ_STATE]
633
634 ldx [%g3 + CWQ_HANDLE], %o1
635
636 CWQ_LOCK_EXIT_L(%g5)
637
638 HCALL_RET(EOK)
639
640.c_qconf_noraddr:
641 CWQ_LOCK_EXIT_L(%g5)
642
643 HCALL_RET(ENORADDR)
644
645.c_qconf_unconfig:
646
647 CWQ_HANDLE2ID_VERIFY(%o1, herr_inval, %g2)
648 GUEST_CID_GETCWQ(%g1, %g2, %g3)
649 brz,pn %g3, herr_noaccess
650 nop
651
652 CWQ_LOCK_ENTER(%g3, %g5, %g1, %g6)
653
654 ld [%g3 + CWQ_QUEUE + CQ_BUSY], %g4
655 brnz,pn %g4, .c_qconf_wouldblock
656 nop
657 /*
658 * Clear any errors and disable CWQ,
659 * then do a synchronous load to wait
660 * for any outstanding ops.
661 */
662
663#ifdef ERRATA_192
664 MAU_LOCK_ENTER(%g7, %g1, %g4, %o0)
665 stx %g0, [%g7 + MAU_ENABLE_CWQ]
666#endif
667
668 mov ASI_SPU_CWQ_CSR, %g4
669 stxa %g0, [%g4]ASI_STREAM
670 /*
671 * Wait for SPU to drain.
672 */
673 mov ASI_SPU_CWQ_SYNC, %g4
674 ldxa [%g4]ASI_STREAM, %g0
675
676#ifdef ERRATA_192
677 MAU_LOCK_EXIT_L(%g1)
678#endif
679
680 CWQ_CLEAR_QSTATE(%g3)
681 mov NCS_QSTATE_UNCONFIGURED, %g1
682 st %g1, [%g3 + CWQ_QUEUE + CQ_STATE]
683
684 CWQ_LOCK_EXIT_L(%g5)
685
686 HCALL_RET(EOK)
687
688.c_qconf_wouldblock:
689 CWQ_LOCK_EXIT_L(%g5)
690
691 HCALL_RET(EWOULDBLOCK)
692
693 SET_SIZE(ncs_qconf_cwq)
694
695/*
696 *-----------------------------------------------------------
697 * Function: ncs_settail_cwq(uint64_t qhandle, uint64_t new_tailoffset)
698 * Arguments:
699 * Input:
700 * %o0 - queue handle
701 * %o1 - new tail offset
702 * %g1 - guest struct
703 * %g7 - cpu struct
704 * Output:
705 * %o0 - EOK (on success),
706 * EINVAL, ENORADDR (on failure)
707 *-----------------------------------------------------------
708 */
709 ENTRY_NP(ncs_settail_cwq)
710
711 CWQ_HANDLE2ID_VERIFY(%o0, herr_inval, %g2)
712 GUEST_CID_GETCWQ(%g1, %g2, %g3)
713 brz,pn %g3, herr_inval
714 nop
715
716 /*
717 * Verify that we're on the CWQ that the
718 * caller specified.
719 */
720 ldx [%g7 + CPU_CWQ], %g4
721 cmp %g4, %g3
722 bne,pn %xcc, herr_inval
723 nop
724
725 btst CWQ_CW_SIZE - 1, %o1
726 bnz,a,pn %xcc, herr_inval
727 nop
728
729 mov %g1, %o0 ! %o0 = guest struct
730
731 ldx [%g3 + CWQ_QUEUE + CQ_BASE], %g1
732 add %g1, %o1, %g1
733 ldx [%g3 + CWQ_QUEUE + CQ_LAST], %g2
734 cmp %g1, %g2
735 bgu,pn %xcc, herr_inval
736 nop
737
738 CWQ_LOCK_ENTER(%g3, %o5, %g4, %g6)
739 stx %o1, [%g3 + CWQ_QUEUE + CQ_SCR1]
740 stx %o2, [%g3 + CWQ_QUEUE + CQ_SCR2]
741 stx %o3, [%g3 + CWQ_QUEUE + CQ_SCR3]
742 /*
743 * Use the per-cwq assigned cpu as target
744 * for interrupts for this job.
745 */
746 ldx [%g3 + CWQ_QUEUE + CQ_DR_HV_OFFSET], %o2
747 ldx [%g3 + CWQ_QUEUE + CQ_CPU_PID], %o1
748 and %o1, NSTRANDS_PER_CWQ_MASK, %o1 ! hw-thread-id
749 sllx %o1, CW_STRAND_ID_SHIFT, %g4 ! prep for CW conrol reg
750
751 mov ASI_SPU_CWQ_TAIL, %g5
752 ldxa [%g5]ASI_STREAM, %g2
753 brz,a,pn %g2, .st_cwq_return
754 mov EINVAL, %o0
755 mov ASI_SPU_CWQ_FIRST, %g5
756 ldxa [%g5]ASI_STREAM, %g3
757 brz,a,pn %g3, .st_cwq_return
758 mov EINVAL, %o0
759 mov ASI_SPU_CWQ_LAST, %g5
760 ldxa [%g5]ASI_STREAM, %g5
761 brz,a,pn %g5, .st_cwq_return
762 mov EINVAL, %o0
763 !!
764 !! %g1 = New Tail
765 !! %g2 = Current Tail
766 !! %g3 = First
767 !!
768 !! %g4 = hw-thread-id shifted over for CTLBITS.
769 !!
770 mov %g2, %g5
771 mov %g2, %g6
772 /*
773 * %g5 = current CW that we're working on.
774 * %o2 = driver queue - HV queue offset.
775 */
776.st_cwq_trans:
777 cmp %g5, %g1
778 be,a,pn %xcc, .st_cwq_trans_done
779 nop
780 add %g5, %o2, %g6
781 ldx [%g6 + CW_CTLBITS], %g6
782
783 mov CW_SOB_MASK, %g7
784 sllx %g7, CW_SOB_SHIFT, %g7
785 andcc %g7, %g6, %g7
786 bz,pn %xcc, .st_cwq_storectl
787 nop
788
789 /*
790 * Fill in the CW_STRAND_ID field with the
791 * physical hwthread-id that we're on.
792 */
793 mov CW_STRAND_ID_MASK, %g7
794 sllx %g7, CW_STRAND_ID_SHIFT, %g7
795 andn %g6, %g7, %g6
796 or %g6, %g4, %g6
797 /*
798 * Force the interrupt bit to be on
799 */
800 mov CW_INTR_MASK, %g7
801 sllx %g7, CW_INTR_SHIFT, %g7
802 or %g6, %g7, %g6
803
804.st_cwq_storectl:
805 stx %g6, [%g5 + CW_CTLBITS]
806
807 setx CW_LENGTH_MASK, %g2, %g7
808 and %g6, %g7, %g7 ! %g7 = cw_length
809 add %g7, 1, %g7
810 srlx %g6, CW_HMAC_KEYLEN_SHIFT, %g6
811 and %g6, CW_HMAC_KEYLEN_MASK, %g6 ! %g6 = cw_hmac_keylen
812 add %g6, 1, %g6
813 /*
814 * Source address should never be NULL.
815 */
816 add %g5, %o2, %g2
817 ldx [%g2 + CW_SRC_ADDR], %g2
818 brz,a,pn %g2, .st_cwq_return
819 mov EINVAL, %o0
820 RA2PA_RANGE_CONV_UNK_SIZE(%o0, %g2, %g7, .st_cwq_noraddr, %o5, %o1)
821 stx %o1, [%g5 + CW_SRC_ADDR]
822
823 add %g5, %o2, %g2
824 ldx [%g2 + CW_AUTH_KEY_ADDR], %g2
825 brz,pn %g2, .st_cwq_chk_authkey
826 mov %g2, %o1
827 RA2PA_RANGE_CONV_UNK_SIZE(%o0, %g2, %g6, .st_cwq_noraddr, %o5, %o1)
828.st_cwq_chk_authkey:
829 stx %o1, [%g5 + CW_AUTH_KEY_ADDR]
830
831 add %g5, %o2, %g2
832 ldx [%g2 + CW_AUTH_IV_ADDR], %g2
833 brz,pn %g2, .st_cwq_chk_authiv
834 mov %g2, %o1
835 mov MAX_IV_LENGTH, %o3
836 RA2PA_RANGE_CONV_UNK_SIZE(%o0, %g2, %o3, .st_cwq_noraddr, %o5, %o1)
837.st_cwq_chk_authiv:
838 stx %o1, [%g5 + CW_AUTH_IV_ADDR]
839
840 add %g5, %o2, %g2
841 ldx [%g2 + CW_FINAL_AUTH_STATE_ADDR], %g2
842 brz,pn %g2, .st_cwq_chk_authst
843 mov %g2, %o1
844 mov MAX_AUTHSTATE_LENGTH, %o3
845 RA2PA_RANGE_CONV_UNK_SIZE(%o0, %g2, %o3, .st_cwq_noraddr, %o5, %o1)
846.st_cwq_chk_authst:
847 stx %o1, [%g5 + CW_FINAL_AUTH_STATE_ADDR]
848
849 add %g5, %o2, %g2
850 ldx [%g2 + CW_ENC_KEY_ADDR], %g2
851 brz,pn %g2, .st_cwq_chk_key
852 mov %g2, %o1
853 RA2PA_RANGE_CONV_UNK_SIZE(%o0, %g2, %g6, .st_cwq_noraddr, %o5, %o1)
854.st_cwq_chk_key:
855 stx %o1, [%g5 + CW_ENC_KEY_ADDR]
856
857 add %g5, %o2, %g2
858 ldx [%g2 + CW_ENC_IV_ADDR], %g2
859 brz,pn %g2, .st_cwq_chk_iv
860 mov %g2, %o1
861 mov MAX_IV_LENGTH, %o3
862 RA2PA_RANGE_CONV_UNK_SIZE(%o0, %g2, %o3, .st_cwq_noraddr, %o5, %o1)
863.st_cwq_chk_iv:
864 stx %o1, [%g5 + CW_ENC_IV_ADDR]
865
866 add %g5, %o2, %g2
867 ldx [%g2 + CW_DST_ADDR], %g2
868 brz,pn %g2, .st_cwq_chk_dst
869 mov %g2, %o1
870 RA2PA_RANGE_CONV_UNK_SIZE(%o0, %g2, %g7, .st_cwq_noraddr, %o5, %o1)
871.st_cwq_chk_dst:
872 stx %o1, [%g5 + CW_DST_ADDR]
873
874 mov ASI_SPU_CWQ_LAST, %o1
875 ldxa [%o1]ASI_STREAM, %o1
876 mov %g5, %g6 ! save the last
877 add %g5, CWQ_CW_SIZE, %g5
878 cmp %g5, %o1 ! current == Last?
879 ba,pt %xcc, .st_cwq_trans
880 movgu %xcc, %g3, %g5 ! current = First
881
882.st_cwq_trans_done:
883 mov CW_EOB_MASK, %g5 ! force set the EOB bit on the last
884 sllx %g5, CW_EOB_SHIFT, %g5 ! CWQ submitted
885 ldx [%g6], %g3
886 or %g3, %g5, %g3
887 stx %g3, [%g6]
888 membar #Sync
889
890 /*
891 * Update our local copy of the Head pointer.
892 * This will ensure that CQ_HEAD is non-zero
893 * for cwq_intr_getstate().
894 */
895 VCPU_STRUCT(%g7)
896 ldx [%g7 + CPU_CWQ], %g3
897 /*
898 * If the cq_head is non-zero then that indicates
899 * it is effectively being managed via sethead,
900 * so we don't want/need to update it here.
901 */
902 ldx [%g3 + CWQ_QUEUE + CQ_HEAD], %g2
903 brnz,pt %g2, .st_cwq_tailonly
904 nop
905 /*
906 * Our first time installing a job on this queue,
907 * so go ahead and initialize cq_head.
908 */
909 mov ASI_SPU_CWQ_HEAD, %g4
910 ldxa [%g4]ASI_STREAM, %g2
911 stx %g2, [%g3 + CWQ_QUEUE + CQ_HEAD]
912
913.st_cwq_tailonly:
914 /*
915 * Update HW's copy of Tail with new Tail.
916 */
917 mov ASI_SPU_CWQ_TAIL, %g5
918 stxa %g1, [%g5]ASI_STREAM
919 stx %g1, [%g3 + CWQ_QUEUE + CQ_TAIL]
920 ba,pt %xcc, .st_cwq_return
921 mov EOK, %o0
922
923.st_cwq_noraddr:
924 mov ENORADDR, %o0
925 /*FALLTHROUGH*/
926
927.st_cwq_return:
928 VCPU_STRUCT(%g7)
929 ldx [%g7 + CPU_CWQ], %g3
930 ldx [%g3 + CWQ_QUEUE + CQ_SCR1], %o1
931 ldx [%g3 + CWQ_QUEUE + CQ_SCR2], %o2
932 ldx [%g3 + CWQ_QUEUE + CQ_SCR3], %o3
933
934 CWQ_LOCK_EXIT(%g3, %o5)
935
936 HCALL_RET(%o0)
937
938 SET_SIZE(ncs_settail_cwq)