Initial commit of OpenSPARC T2 design and verification files.
[OpenSPARC-T2-DV] / verif / diag / assembly / include / c / hcall.s
CommitLineData
86530b38
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* OpenSPARC T2 Processor File: hcall.s
5* Copyright (C) 1995-2007 Sun Microsystems, Inc. All Rights Reserved
6* 4150 Network Circle, Santa Clara, California 95054, U.S.A.
7*
8* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
9*
10* This program is free software; you can redistribute it and/or modify
11* it under the terms of the GNU General Public License as published by
12* the Free Software Foundation; version 2 of the License.
13*
14* This program is distributed in the hope that it will be useful,
15* but WITHOUT ANY WARRANTY; without even the implied warranty of
16* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17* GNU General Public License for more details.
18*
19* You should have received a copy of the GNU General Public License
20* along with this program; if not, write to the Free Software
21* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22*
23* For the avoidance of doubt, and except that if any non-GPL license
24* choice is available it will apply instead, Sun elects to use only
25* the General Public License version 2 (GPLv2) at this time for any
26* software where a choice of GPL license versions is made
27* available with the language indicating that GPLv2 or any later version
28* may be used, or where a choice of which version of the GPL is applied is
29* otherwise unspecified.
30*
31* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
32* CA 95054 USA or visit www.sun.com if you need additional information or
33* have any questions.
34*
35*
36* ========== Copyright Header End ============================================
37*/
38#define HCALL_TRAP_BASE_PA 0x2000
39#define HCALL_TRAP_DATA_PA 0x4000
40#define ASI_PRIMARY_LITTLE 0x88
41SECTION .HCALL_TRAPS TEXT_VA=HCALL_TRAP_BASE_PA, DATA_VA=HCALL_TRAP_DATA_PA
42
43attr_text {
44 Name = .HCALL_TRAPS,
45 hypervisor,
46 }
47attr_data {
48 Name = .HCALL_TRAPS,
49 hypervisor,
50 }
51
52.text
53
54 .ident "@(#)hcall.s 1.72 05/04/29 SMI"
55
56 .file "hcall.s"
57
58/*
59 * Niagara API calls
60 */
61#include <hypervisor.h>
62#define T_BAD_TRAP 1
63#if 0
64#include <sys/asm_linkage.h>
65#include <sparcv9/misc.h>
66#include <niagara/asi.h>
67#include <niagara/mmu.h>
68#include <niagara/jbi_regs.h>
69#include <niagara/dram.h>
70#include <sun4v/traps.h>
71#include <sun4v/asi.h>
72#include <sun4v/mmu.h>
73#include <sun4v/queue.h>
74#include <devices/pc16550.h>
75#include <sparcv9/asi.h>
76#include <mmustat.h>
77
78#include "config.h"
79#include "cpu.h"
80#include "guest.h"
81#ifdef N2
82#include "offsets-n2.h"
83#else /* N2 */
84#include "offsets-n1.h"
85#endif /* N2 */
86#include "traptrace.h"
87#include "svc.h"
88#include "util.h"
89#include "vdev_ops.h"
90#include "vdev_intr.h"
91#include "debug.h"
92
93#ifdef CONFIG_BRINGUP
94#define VDEV_GENINTR 0x280 /* for testing */
95#endif /* CONFIG_BRINGUP */
96
97
98/*
99 * hcall_core - entry point for the core hcalls: versioning plus
100 * aliases for standard APIs that need to be called when there
101 * exists a version mismatch.
102 */
103 ENTRY_NP(hcall_core)
104 HCALL_RET(EBADTRAP)
105 SET_SIZE(hcall_core)
106#endif
107
108
109/*
110 * hcall - entry point for FAST_TRAP hcalls
111 */
112.global my_hcall_code
113my_hcall_code:
114#if 0
115 ENTRY_NP(hcall)
116 cmp %o5, MMU_DEMAP_PAGE
117 be,pn %xcc, hcall_mmu_demap_page
118 cmp %o5, MMU_DEMAP_CTX
119 be,pn %xcc, hcall_mmu_demap_ctx
120 cmp %o5, MMU_DEMAP_ALL
121 be,pn %xcc, hcall_mmu_demap_all
122 cmp %o5, CPU_MONDO_SEND
123 be,pn %xcc, hcall_cpu_mondo_send
124 cmp %o5, IO_PEEK ! io peek, suppress errors
125 be,pn %xcc, hcall_io_peek
126 cmp %o5, IO_POKE ! io poke, suppress errors
127 be,pn %xcc, hcall_io_poke
128
129#endif
130 cmp %o5, CONS_PUTCHAR
131 be,pn %xcc, hcall_cons_putchar
132#if 0
133 cmp %o5, CONS_GETCHAR
134 be,pn %xcc, hcall_cons_getchar
135 cmp %o5, TOD_GET
136 be,pn %xcc, hcall_tod_get
137 cmp %o5, TOD_SET
138 be,pn %xcc, hcall_tod_set
139 cmp %o5, MMU_TSB_CTX0
140 be,pn %xcc, hcall_mmu_tsb_ctx0
141 cmp %o5, MMU_TSB_CTXNON0
142 be,pn %xcc, hcall_mmu_tsb_ctxnon0
143 cmp %o5, MMU_MAP_PERM_ADDR
144 be,pn %xcc, hcall_mmu_map_perm_addr
145 cmp %o5, MMU_UNMAP_PERM_ADDR
146 be,pn %xcc, hcall_mmu_unmap_perm_addr
147 cmp %o5, MMU_FAULT_AREA_CONF
148 be,pn %xcc, hcall_mmu_fault_area_conf
149 cmp %o5, MEM_SCRUB
150 be,pn %xcc, hcall_mem_scrub
151 cmp %o5, MEM_SYNC
152 be,pn %xcc, hcall_mem_sync
153#if defined(NVRAM_READ) && defined(NVRAM_WRITE)
154 cmp %o5, NVRAM_READ
155 be,pn %xcc, hcall_nvram_read
156 cmp %o5, NVRAM_WRITE
157 be,pn %xcc, hcall_nvram_write
158#endif
159#ifdef CONFIG_SVC
160 cmp %o5, SVC_SEND
161 be,pn %xcc, hcall_svc_send
162 cmp %o5, SVC_RECV
163 be,pn %xcc, hcall_svc_recv
164 cmp %o5, SVC_GETSTATUS
165 be,pn %xcc, hcall_svc_getstatus
166 cmp %o5, SVC_SETSTATUS
167 be,pn %xcc, hcall_svc_setstatus
168 cmp %o5, SVC_CLRSTATUS
169 be,pn %xcc, hcall_svc_clrstatus
170#endif
171 cmp %o5, CPU_QINFO
172 be,pn %xcc, hcall_qinfo
173 cmp %o5, CPU_QCONF
174 be,pn %xcc, hcall_qconf
175 cmp %o5, CPU_START
176 be,pn %xcc, hcall_cpu_start
177 cmp %o5, CPU_STOP
178 be,pn %xcc, hcall_cpu_stop
179 cmp %o5, CPU_STATE
180 be,pn %xcc, hcall_cpu_state
181 cmp %o5, CPU_YIELD
182 be,pn %xcc, hcall_cpu_yield
183 cmp %o5, MACH_SIR
184 be,pn %xcc, hcall_mach_sir
185 cmp %o5, MACH_EXIT
186 be,pn %xcc, hcall_mach_exit
187 cmp %o5, CPU_MYID
188 be,pn %xcc, hcall_cpu_myid
189 cmp %o5, MMU_ENABLE
190 be,pn %xcc, hcall_mmu_enable
191 cmp %o5, MMU_TSB_CTX0_INFO
192 be,pn %xcc, hcall_mmu_tsb_ctx0_info
193 cmp %o5, MMU_TSB_CTXNON0_INFO
194 be,pn %xcc, hcall_mmu_tsb_ctxnon0_info
195 cmp %o5, NIAGARA_GET_PERFREG
196 be,pn %xcc, hcall_niagara_getperf
197 cmp %o5, NIAGARA_SET_PERFREG
198 be,pn %xcc, hcall_niagara_setperf
199 cmp %o5, DIAG_RA2PA
200 be,pn %xcc, hcall_ra2pa
201 cmp %o5, DIAG_HEXEC
202 be,pn %xcc, hcall_hexec
203 cmp %o5, MACH_DESC
204 be,pn %xcc, hcall_mach_desc
205 cmp %o5, DUMP_BUF_INFO
206 be,pn %xcc, hcall_dump_buf_info
207 cmp %o5, DUMP_BUF_UPDATE
208 be,pn %xcc, hcall_dump_buf_update
209 cmp %o5, INTR_DEVINO2SYSINO
210 be,pn %xcc, hcall_intr_devino2sysino
211 cmp %o5, INTR_GETENABLED
212 be,pn %xcc, hcall_intr_getenabled
213 cmp %o5, INTR_SETENABLED
214 be,pn %xcc, hcall_intr_setenabled
215 cmp %o5, INTR_GETSTATE
216 be,pn %xcc, hcall_intr_getstate
217 cmp %o5, INTR_SETSTATE
218 be,pn %xcc, hcall_intr_setstate
219 cmp %o5, INTR_GETTARGET
220 be,pn %xcc, hcall_intr_gettarget
221 cmp %o5, INTR_SETTARGET
222 be,pn %xcc, hcall_intr_settarget
223#ifndef N2
224 cmp %o5, VPCI_IOMMU_MAP
225 be,pn %xcc, hcall_vpci_iommu_map
226 cmp %o5, VPCI_IOMMU_UNMAP
227 be,pn %xcc, hcall_vpci_iommu_unmap
228 cmp %o5, VPCI_IOMMU_GETMAP
229 be,pn %xcc, hcall_vpci_iommu_getmap
230 cmp %o5, VPCI_IOMMU_GETBYPASS
231 be,pn %xcc, hcall_vpci_iommu_getbypass
232 cmp %o5, VPCI_CONFIG_GET
233 be,pn %xcc, hcall_vpci_config_get
234 cmp %o5, VPCI_CONFIG_PUT
235 be,pn %xcc, hcall_vpci_config_put
236#endif
237#endif
238 cmp %o5, VPCI_IO_PEEK
239 be,pn %xcc, hcall_vpci_io_peek
240 cmp %o5, VPCI_IO_POKE
241 be,pn %xcc, hcall_vpci_io_poke
242#if 0
243#ifndef N2
244 cmp %o5, VPCI_DMA_SYNC
245 be,pn %xcc, hcall_vpci_dma_sync
246 cmp %o5, MSIQ_CONF
247 be,pn %xcc, hcall_msiq_conf
248 cmp %o5, MSIQ_INFO
249 be,pn %xcc, hcall_msiq_info
250 cmp %o5, MSIQ_GETVALID
251 be,pn %xcc, hcall_msiq_getvalid
252 cmp %o5, MSIQ_SETVALID
253 be,pn %xcc, hcall_msiq_setvalid
254 cmp %o5, MSIQ_GETSTATE
255 be,pn %xcc, hcall_msiq_getstate
256 cmp %o5, MSIQ_SETSTATE
257 be,pn %xcc, hcall_msiq_setstate
258 cmp %o5, MSIQ_GETHEAD
259 be,pn %xcc, hcall_msiq_gethead
260 cmp %o5, MSIQ_SETHEAD
261 be,pn %xcc, hcall_msiq_sethead
262 cmp %o5, MSIQ_GETTAIL
263 be,pn %xcc, hcall_msiq_gettail
264 cmp %o5, MSI_GETVALID
265 be,pn %xcc, hcall_msi_getvalid
266 cmp %o5, MSI_SETVALID
267 be,pn %xcc, hcall_msi_setvalid
268 cmp %o5, MSI_GETSTATE
269 be,pn %xcc, hcall_msi_getstate
270 cmp %o5, MSI_SETSTATE
271 be,pn %xcc, hcall_msi_setstate
272 cmp %o5, MSI_GETMSIQ
273 be,pn %xcc, hcall_msi_getmsiq
274 cmp %o5, MSI_SETMSIQ
275 be,pn %xcc, hcall_msi_setmsiq
276 cmp %o5, MSI_MSG_GETVALID
277 be,pn %xcc, hcall_msi_msg_getvalid
278 cmp %o5, MSI_MSG_SETVALID
279 be,pn %xcc, hcall_msi_msg_setvalid
280 cmp %o5, MSI_MSG_GETMSIQ
281 be,pn %xcc, hcall_msi_msg_getmsiq
282 cmp %o5, MSI_MSG_SETMSIQ
283 be,pn %xcc, hcall_msi_msg_setmsiq
284#endif /* !N2 */
285#ifdef CONFIG_DISK
286 cmp %o5, DISK_READ
287 be,pn %xcc, hcall_disk_read
288 cmp %o5, DISK_WRITE
289 be,pn %xcc, hcall_disk_write
290#endif
291#ifndef N2
292 cmp %o5, NCS_REQUEST
293 be,pn %xcc, hcall_ncs_request
294#endif /* !N2 */
295#ifdef MMU_STATS
296 cmp %o5, MMU_STAT_AREA
297 be,pn %xcc, hcall_mmu_stat_area
298#endif /* MMU_STATS */
299 cmp %o5, TTRACE_BUF_CONF
300 be,pn %xcc, hcall_ttrace_buf_conf
301 cmp %o5, TTRACE_BUF_INFO
302 be,pn %xcc, hcall_ttrace_buf_info
303 cmp %o5, TTRACE_ENABLE
304 be,pn %xcc, hcall_ttrace_enable
305 cmp %o5, TTRACE_FREEZE
306 be,pn %xcc, hcall_ttrace_freeze
307 cmp %o5, MMU_FAULT_AREA_INFO
308 be,pn %xcc, hcall_mmu_fault_area_info
309 cmp %o5, CPU_GET_RTBA
310 be,pn %xcc, hcall_get_rtba
311 cmp %o5, CPU_SET_RTBA
312 be,pn %xcc, hcall_set_rtba
313#ifdef CONFIG_BRINGUP
314 cmp %o5, VDEV_GENINTR
315 be,pn %xcc, hcall_vdev_genintr
316#endif
317 nop
318 HCALL_RET(EBADTRAP)
319#endif
320 nop
321 ta T_BAD_TRAP
322 nop
323
324#if 0
325 SET_SIZE(hcall)
326
327
328/*
329 * Common error escapes so errors can be implemented by
330 * cmp, branch.
331 */
332 ENTRY(hret_ok)
333 HCALL_RET(EOK)
334 SET_SIZE(hret_ok)
335
336 ENTRY(herr_nocpu)
337 HCALL_RET(ENOCPU)
338 SET_SIZE(herr_nocpu)
339
340 ENTRY(herr_noraddr)
341 HCALL_RET(ENORADDR)
342 SET_SIZE(herr_noraddr)
343
344 ENTRY(herr_nointr)
345 HCALL_RET(ENOINTR)
346 SET_SIZE(herr_nointr)
347
348 ENTRY(herr_badpgsz)
349 HCALL_RET(EBADPGSZ)
350 SET_SIZE(herr_badpgsz)
351
352 ENTRY(herr_badtsb)
353 HCALL_RET(EBADTSB)
354 SET_SIZE(herr_badtsb)
355
356 ENTRY(herr_inval)
357 HCALL_RET(EINVAL)
358 SET_SIZE(herr_inval)
359
360 ENTRY(herr_badtrap)
361 HCALL_RET(EBADTRAP)
362 SET_SIZE(herr_badtrap)
363
364 ENTRY(herr_badalign)
365 HCALL_RET(EBADALIGN)
366 SET_SIZE(herr_badalign)
367
368 ENTRY(herr_wouldblock)
369 HCALL_RET(EWOULDBLOCK)
370 SET_SIZE(herr_wouldblock)
371
372 ENTRY(herr_noaccess)
373 HCALL_RET(ENOACCESS)
374 SET_SIZE(herr_noaccess)
375
376 ENTRY(herr_ioerror)
377 HCALL_RET(EIO)
378 SET_SIZE(herr_ioerror)
379
380 ENTRY(herr_toomany)
381 HCALL_RET(ETOOMANY)
382 SET_SIZE(herr_toomany)
383
384 ENTRY(herr_nomap)
385 HCALL_RET(ENOMAP)
386 SET_SIZE(herr_nomap)
387
388 ENTRY(herr_notsupported)
389 HCALL_RET(ENOTSUPPORTED)
390 SET_SIZE(herr_notsupported)
391
392
393#ifdef CONFIG_IO_PEEK_POKE
394/*
395 * Function: hcall_io_peek(void *ioaddr, int bw)
396 * Input:
397 * %o5 - hcall function number
398 * %o0 - i/o addr
399 * %o1 - read byte width
400 * (1 = byte, 2 = halfword, 4 = word, 8 = double
401 * Output:
402 * %o0 - EOK (for success), EINVAL or EIO (for failure)
403 * %o1 - i/o data on successful read
404 */
405 ENTRY_NP(hcall_io_peek)
406 CPU_STRUCT(%g1)
407
408 ! set io_prot flag
409 set 1, %g2
410 set CPU_IO_PROT, %g3
411 stx %g2, [%g1 + %g3] ! cpu.io_prot = 1
412
413 ! check for valid args
414 cmp %o1, 1
415 bz .byte_read
416 cmp %o1, 2
417 bz .halfword_read
418 cmp %o1, 4
419 bz .word_read
420 cmp %o1, 8
421 bz .extword_read
422 nop
423 ! clear io_prot, return EINVAL
424 ba herr_inval
425 stx %g0, [%g1 + %g3] ! cpu.io_prot = 0
426
427 ! %g1 has the cpu pointer
428.byte_read:
429 ldub [%o0], %g2
430 ba,a 1f
431.halfword_read:
432 lduh [%o0], %g2
433 ba,a 1f
434.word_read:
435 ld [%o0], %g2
436 ba,a 1f
437.extword_read:
438 ldx [%o0], %g2
439 /*
440 * check io_error flag which will be nonzero if a UE occurred
441 * %g1 has this_cpu, %g2 has read return data
442 * %g3 is cpu.io_prot offset
443 */
4441:
445 set CPU_IO_ERROR, %g4
446 ldx [%g1 + %g4], %g5 ! cpu.io_error
447 brnz %g5, 2f ! i/o error
448 stx %g0, [%g1 + %g3] ! cpu.io_prot = 0
449 ! no i/o error
450 mov %g2, %o1 ! return data in %o1
451 HCALL_RET(EOK)
452
453 ! i/o error, clear io_error flag
4542:
455 ba herr_ioerror
456 stx %g0, [%g1 + %g4] ! cpu.io_error = 0
457 SET_SIZE(hcall_io_peek)
458
459/*
460 * Function: hcall_io_poke(void *addr, uint64_t data, int size)
461 * Arguments:
462 * Input:
463 * %o5 - hcall function number
464 * %o0 - i/o address
465 * %o1 - write data
466 * %o2 - write byte width
467 * (1 = byte, 2 = halfword, 4 = word, 8 = extword)
468 * Output:
469 * %o0 - EOK (on success), EINVAL or EIO (on failure)
470 */
471 ENTRY_NP(hcall_io_poke)
472 CPU_STRUCT(%g1)
473
474 ! set io_prot flag
475 set 1, %g2
476 set CPU_IO_PROT, %g3
477 stx %g2, [%g1 + %g3] ! cpu.io_prot = 1
478
479 ! check for valid args
480 cmp %o2, 1
481 bz .byte_write
482 cmp %o2, 2
483 bz .halfword_write
484 cmp %o2, 4
485 bz .word_write
486 cmp %o2, 8
487 bz .extword_write
488 nop
489 ! clear io_prot, return EINVAL
490 ba herr_inval
491 stx %g0, [%g1 + %g3] ! cpu.io_prot = 0
492
493 ! %g1 has the cpu pointer
494.byte_write:
495 stb %o1, [%o0]
496 ba,a 1f
497.halfword_write:
498 sth %o1, [%o0]
499 ba,a 1f
500.word_write:
501 st %o1, [%o0]
502 ba,a 1f
503.extword_write:
504 stx %o1, [%o0]
505 /*
506 * check io_error flag which will be nonzero if a UE occurred
507 * %g1 has this_cpu, %g2 has read return data
508 * %g3 is cpu.io_prot offset
509 */
5101:
511 set CPU_IO_ERROR, %g4
512 ldx [%g1 + %g4], %g5
513 brnz %g5, 2f ! i/o error
514 stx %g0, [%g1 + %g3] ! cpu.io_prot = 0
515 HCALL_RET(EOK)
516
517 ! i/o error, clear io_error flag
5182:
519 ba herr_ioerror
520 stx %g0, [%g1 + %g4] ! cpu.io_error = 0
521 SET_SIZE(hcall_io_poke)
522#endif
523
524
525/*
526 * mach_exit
527 *
528 * arg0 exit code (%o0)
529 * --
530 * does not return
531 */
532 ENTRY_NP(hcall_mach_exit)
533 /*
534 * - quiesce all other cpus in guest
535 * - re-initialize guest
536 * - go back to start so boot cpu (maybe not this cpu)
537 * can reboot the guest or wait for further instructions
538 * from the Higher One
539 */
540#ifdef CONFIG_VBSC_SVC
541 ba,pt %xcc, vbsc_guest_exit
542 nop
543#else
544 LEGION_EXIT(%o0)
545#endif
546 HCALL_RET(EBADTRAP)
547 SET_SIZE(hcall_mach_exit)
548
549
550/*
551 * mach_sir
552 *
553 * --
554 * does not return
555 */
556 ENTRY_NP(hcall_mach_sir)
557 /*
558 * - quiesce all other cpus in guest
559 * - re-initialize guest
560 * - go back to start so boot cpu (maybe not this cpu)
561 * can reboot the guest or wait for further instructions
562 * from the Higher One
563 */
564#ifdef CONFIG_VBSC_SVC
565 ba,pt %xcc, vbsc_guest_sir
566 nop
567#else
568 LEGION_EXIT(0)
569#endif
570 HCALL_RET(EBADTRAP)
571 SET_SIZE(hcall_mach_sir)
572
573
574/*
575 * mach_desc
576 *
577 * arg0 buffer (%o0)
578 * arg1 len (%o1)
579 * --
580 * ret0 status (%o0)
581 * ret1 actual len (%o1) (for EOK or EINVAL)
582 *
583 * guest uses this sequence to get the machine description:
584 * mach_desc(0, 0)
585 * if %o0 != EINVAL, failed
586 * len = %o1
587 * buf = allocate(len)
588 * mach_desc(buf, len)
589 * if %o0 != EOK, failed
590 * so the EINVAL case is the first error check
591 */
592 ENTRY_NP(hcall_mach_desc)
593 CPU_GUEST_STRUCT(%g1, %g6)
594 set GUEST_PD_SIZE, %g7
595 ldx [%g6 + %g7], %g3
596 ! paranoia for xcopy - should already be 16byte multiple
597 add %g3, MACH_DESC_ALIGNMENT - 1, %g3
598 andn %g3, MACH_DESC_ALIGNMENT - 1, %g3
599 cmp %g3, %o1
600 bgu,pn %xcc, herr_inval
601 mov %g3, %o1 ! return PD size for success or EINVAL
602
603 btst MACH_DESC_ALIGNMENT - 1, %o0
604 bnz,pn %xcc, herr_badalign
605 .empty /* RANGE_CHECK may start in a delay slot */
606
607 RANGE_CHECK(%g6, %o0, %g3, herr_noraddr, %g4)
608 REAL_OFFSET(%g6, %o0, %g4, %g5)
609 ! %g3 = size of pd
610 ! %g4 = pa of guest buffer
611 /* xcopy(pd, buf[%o0], size[%g3]) */
612 set GUEST_PD_PA, %g7
613 ldx [%g6 + %g7], %g1
614 mov %g4, %g2
615 ba xcopy
616 rd %pc, %g7
617
618 ! %o1 was set above to the guest's PD size
619 HCALL_RET(EOK)
620 SET_SIZE(hcall_mach_desc)
621
622
623/*
624 * tod_get - Time-of-day get
625 *
626 * no arguments
627 * --
628 * ret0 status (%o0)
629 * ret1 tod (%o1)
630 */
631 ENTRY_NP(hcall_tod_get)
632 CPU_STRUCT(%g1)
633 CPU2ROOT_STRUCT(%g1, %g2)
634 CPU2GUEST_STRUCT(%g1, %g1)
635 !! %g1 guestp
636 !! %g2 configp
637 ldx [%g1 + GUEST_TOD_OFFSET], %g3
638 ldx [%g2 + CONFIG_TOD], %g4
639 ldx [%g2 + CONFIG_TODFREQUENCY], %g5
640 !! %g3 guest's tod offset
641 !! %g4 tod
642 !! %g5 tod frequency
643#ifdef CONFIG_STATICTOD
644 ! If the PD says no TOD then start with 0
645 brz,pn %g4, hret_ok
646 clr %o1
647#else
648 brz,pn %g4, herr_notsupported
649 clr %o1 ! In case error status not checked
650#endif
651
652 ldx [%g4], %o1
653 udivx %o1, %g5, %o1 ! Convert to seconds
654 add %o1, %g3, %o1 ! Add partition's tod offset
655 HCALL_RET(EOK)
656 SET_SIZE(hcall_tod_get)
657
658/*
659 * tod_set - Time-of-day set
660 *
661 * arg0 tod (%o0)
662 * --
663 * ret0 status (%o0)
664 */
665 ENTRY_NP(hcall_tod_set)
666 CPU_STRUCT(%g1)
667 CPU2ROOT_STRUCT(%g1, %g2)
668 CPU2GUEST_STRUCT(%g1, %g1)
669 !! %g1 guestp
670 !! %g2 configp
671 ldx [%g1 + GUEST_TOD_OFFSET], %g3
672 ldx [%g2 + CONFIG_TOD], %g4
673 ldx [%g2 + CONFIG_TODFREQUENCY], %g5
674 !! %g3 guest's tod offset
675 !! %g4 tod
676 !! %g5 tod frequency
677
678#ifdef CONFIG_STATICTOD
679 /*
680 * If no hardware TOD then tod-get returned 0 the first time
681 * and will continue to do so.
682 */
683 brz,pn %g4, hret_ok
684 nop
685#else
686 brz,pn %g4, herr_notsupported
687 nop
688#endif
689
690 ldx [%g4], %g6 ! %g6 = system tod
691 udivx %g6, %g5, %g6 ! Convert to seconds
692 sub %o0, %g6, %g6 ! %g4 = new delta
693 stx %g6, [%g1 + GUEST_TOD_OFFSET]
694 HCALL_RET(EOK)
695 SET_SIZE(hcall_tod_set)
696
697
698/*
699 * mmu_enable
700 *
701 * arg0 enable (%o0)
702 * arg1 return address (%o1)
703 * --
704 * ret0 status (%o0)
705 */
706 ENTRY_NP(hcall_mmu_enable)
707 /*
708 * Check requested return address for instruction
709 * alignment
710 */
711 btst (INSTRUCTION_ALIGNMENT - 1), %o1
712 bnz,pn %xcc, herr_badalign
713 nop
714
715 ldxa [%g0]ASI_LSUCR, %g1
716 set (LSUCR_DM | LSUCR_IM), %g2
717 !! %g1 = current lsucr value
718 !! %g2 = mmu enable mask
719
720 brz,pn %o0, 1f ! enable or disable?
721 btst %g1, %g2 ! ccr indicates current status
722
723 /*
724 * Trying to enable
725 *
726 * The return address will be virtual and we cannot
727 * check its range, the alignment has already been
728 * checked.
729 */
730 bnz,pn %xcc, herr_inval ! it's already enabled
731 or %g1, %g2, %g1 ! enable MMU
732
733 ba,pt %xcc, 2f
734 nop
735
7361:
737 /*
738 * Trying to disable
739 *
740 * The return address is a real address so we check
741 * its range, the alignment has already been checked.
742 */
743 bz,pn %xcc, herr_inval ! it's already disabled
744 andn %g1, %g2, %g1 ! disable MMU
745
746 /* Check RA range */
747 GUEST_STRUCT(%g3)
748 RANGE_CHECK(%g3, %o1, INSTRUCTION_SIZE, herr_noraddr, %g4)
749
7502:
751 wrpr %o1, %tnpc
752 stxa %g1, [%g0]ASI_LSUCR
753 HCALL_RET(EOK)
754 SET_SIZE(hcall_mmu_enable)
755
756
757/*
758 * mmu_fault_area_conf
759 *
760 * arg0 raddr (%o0)
761 * --
762 * ret0 status (%o0)
763 * ret1 oldraddr (%o1)
764 */
765 ENTRY_NP(hcall_mmu_fault_area_conf)
766 btst (MMU_FAULT_AREA_ALIGNMENT - 1), %o0 ! check alignment
767 bnz,pn %xcc, herr_badalign
768 CPU_GUEST_STRUCT(%g1, %g4)
769 brz,a,pn %o0, 1f
770 mov 0, %g2
771 RANGE_CHECK(%g4, %o0, MMU_FAULT_AREA_SIZE, herr_noraddr, %g3)
772 REAL_OFFSET(%g4, %o0, %g2, %g3)
7731:
774 ldx [%g1 + CPU_MMU_AREA_RA], %o1
775 stx %o0, [%g1 + CPU_MMU_AREA_RA]
776 stx %g2, [%g1 + CPU_MMU_AREA]
777
778 HCALL_RET(EOK)
779 SET_SIZE(hcall_mmu_fault_area_conf)
780
781/*
782 * mmu_fault_area_info
783 *
784 * --
785 * ret0 status (%o0)
786 * ret1 fault area raddr (%o1)
787 */
788 ENTRY_NP(hcall_mmu_fault_area_info)
789 CPU_STRUCT(%g1)
790 ldx [%g1 + CPU_MMU_AREA_RA], %o1
791 HCALL_RET(EOK)
792 SET_SIZE(hcall_mmu_fault_area_info)
793
794/*
795 * mmu_tsb_ctx0
796 *
797 * arg0 ntsb (%o0)
798 * arg1 tsbs (%o1)
799 * --
800 * ret0 status (%o0)
801 */
802 ENTRY_NP(hcall_mmu_tsb_ctx0)
803 CPU_GUEST_STRUCT(%g5, %g6)
804 INC_MMU_STAT(%g5, MMUSTAT_SET0, %g2, %g3)
805 /* set cpu->ntsbs to zero now in case we error exit */
806 stx %g0, [%g5 + CPU_NTSBS_CTX0]
807 /* Also zero out H/W bases */
808 ba set_dummytsb_ctx0
809 rd %pc, %g7
810 brz,pn %o0, setntsbs0
811 cmp %o0, MAX_NTSB
812 bgu,pn %xcc, herr_inval
813 btst TSBD_ALIGNMENT - 1, %o1
814 bnz,pn %xcc, herr_badalign
815 sllx %o0, TSBD_SHIFT, %g3
816 RANGE_CHECK(%g6, %o1, %g3, herr_noraddr, %g2)
817 /* xcopy(tsbs, cpu->tsbds, ntsbs*TSBD_BYTES) */
818 REAL_OFFSET(%g6, %o1, %g1, %g2)
819 add %g5, CPU_TSBDS_CTX0, %g2
820 ! xcopy trashes g1-4
821 ba xcopy
822 rd %pc, %g7
823 /* loop over each TSBD and validate */
824 mov %o0, %g1
825 add %g5, CPU_TSBDS_CTX0, %g2
8261:
827 /* check pagesize - accept any size encoding? XXX */
828 /* XXX pageszidx is lowest-order bit of pageszmask */
829 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
830 cmp %g3, NPGSZ
831 bgeu,pn %xcc, herr_badpgsz
832 nop
833 /* check associativity - only support 1-way */
834 lduh [%g2 + TSBD_ASSOC_OFF], %g3
835 cmp %g3, 1
836 bne,pn %icc, herr_badtsb
837 nop
838 /* check TSB size */
839 ld [%g2 + TSBD_SIZE_OFF], %g3
840 sub %g3, 1, %g4
841 btst %g3, %g4
842 bnz,pn %icc, herr_badtsb
843 mov TSB_SZ0_ENTRIES, %g4
844 cmp %g3, %g4
845 blt,pn %icc, herr_badtsb
846 sll %g4, TSB_MAX_SZCODE, %g4
847 cmp %g3, %g4
848 bgt,pn %icc, herr_badtsb
849 nop
850 /* check context index field - must be -1 (shared) or zero */
851 ld [%g2 + TSBD_CTX_INDEX], %g3
852 cmp %g3, TSBD_CTX_IDX_SHARE
853 be %icc, 2f ! -1 is OK
854 nop
855 brnz,pn %g3, herr_inval ! only one set of context regs
856 nop
8572:
858 /* check reserved field - must be zero for now */
859 ldx [%g2 + TSBD_RSVD_OFF], %g3
860 brnz,pn %g3, herr_inval
861 nop
862 /* check TSB base real address */
863 ldx [%g2 + TSBD_BASE_OFF], %g3
864 ld [%g2 + TSBD_SIZE_OFF], %g4
865 sllx %g4, TSBE_SHIFT, %g4
866 RANGE_CHECK(%g6, %g3, %g4, herr_noraddr, %g7)
867 /* range OK, check alignment */
868 sub %g4, 1, %g4
869 btst %g3, %g4
870 bnz,pn %xcc, herr_badalign
871 sub %g1, 1, %g1
872 brnz,pt %g1, 1b
873 add %g2, TSBD_BYTES, %g2
874
875#ifdef N2
876 /* now setup HWTW regs */
877 /* XXX - only look at first two TSBDs for now */
878 /* XXX - setup use_context if TSBD context not shared or zero */
879
880 /* process first TSBD */
881 add %g5, CPU_TSBDS_CTX0, %g2
882 ldx [%g2 + TSBD_BASE_OFF], %g1
883 REAL_OFFSET(%g6, %g1, %g1, %g4) ! start with TSB base PA
884
885 ld [%g2 + TSBD_SIZE_OFF], %g4
886 srl %g4, TSB_SZ0_SHIFT, %g4
8871:
888 btst 1, %g4
889 srl %g4, 1, %g4
890 bz,a,pt %icc, 1b
891 add %g1, 1, %g1 ! increment TSB size field
892
893 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
894 sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
895 or %g1, %g4, %g1 ! add page size field
896 or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
897 mov 1, %g4
898 sllx %g4, 63, %g4
899 or %g1, %g4, %g1 ! add valid bit
900
901 mov TSB_CFG_CTX0_0, %g4
902 stxa %g1, [%g4]ASI_MMU_TSB
903
904 /* process second TSBD, if available */
905 cmp %o0, 1
906 be,pt %xcc, 2f
907 add %g2, TSBD_BYTES, %g2 ! move to next TSBD
908 ldx [%g2 + TSBD_BASE_OFF], %g1
909 REAL_OFFSET(%g6, %g1, %g1, %g4) ! start with TSB base PA
910 ld [%g2 + TSBD_SIZE_OFF], %g4
911 srl %g4, TSB_SZ0_SHIFT, %g4
9121:
913 btst 1, %g4
914 srl %g4, 1, %g4
915 bz,a,pt %icc, 1b
916 add %g1, 1, %g1 ! increment TSB size field
917
918 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
919 sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
920 or %g1, %g4, %g1 ! add page size field
921 or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
922 mov 1, %g4
923 sllx %g4, 63, %g4
924 or %g1, %g4, %g1 ! add valid bit
925
926 mov TSB_CFG_CTX0_1, %g4
927 stxa %g1, [%g4]ASI_MMU_TSB
9282:
929 stx %o0, [%g5 + CPU_NTSBS_CTX0]
930#else /* N2 */
931 /* now setup H/W TSB regs */
932 /* only look at first two TSBDs for now */
933 add %g5, CPU_TSBDS_CTX0, %g2
934 ldx [%g2 + TSBD_BASE_OFF], %g1
935 REAL_OFFSET(%g6, %g1, %g1, %g4)
936 ld [%g2 + TSBD_SIZE_OFF], %g4
937 srl %g4, TSB_SZ0_SHIFT, %g4
9381:
939 btst 1, %g4
940 srl %g4, 1, %g4
941 bz,a,pt %icc, 1b
942 add %g1, 1, %g1 ! increment TSB size field
943
944 stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS0
945 stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS0
946
947 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
948 stxa %g3, [%g0]ASI_DTSB_CONFIG_CTX0 ! (PS0 only)
949 stxa %g3, [%g0]ASI_ITSB_CONFIG_CTX0 ! (PS0 only)
950
951 /* process second TSBD, if available */
952 cmp %o0, 1
953 be,pt %xcc, 2f
954 add %g2, TSBD_BYTES, %g2 ! move to next TSBD
955 ldx [%g2 + TSBD_BASE_OFF], %g1
956 REAL_OFFSET(%g6, %g1, %g1, %g4)
957 ld [%g2 + TSBD_SIZE_OFF], %g4
958 srl %g4, TSB_SZ0_SHIFT, %g4
9591:
960 btst 1, %g4
961 srl %g4, 1, %g4
962 bz,a,pt %icc, 1b
963 add %g1, 1, %g1 ! increment TSB size field
964
965 stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS1
966 stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS1
967
968 /* %g3 still has old CONFIG value. */
969 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g7
970 sllx %g7, ASI_TSB_CONFIG_PS1_SHIFT, %g7
971 or %g3, %g7, %g3
972 stxa %g3, [%g0]ASI_DTSB_CONFIG_CTX0 ! (PS0 + PS1)
973 stxa %g3, [%g0]ASI_ITSB_CONFIG_CTX0 ! (PS0 + PS1)
974
9752:
976 stx %o0, [%g5 + CPU_NTSBS_CTX0]
977#endif /* N2 */
978setntsbs0:
979 clr %o1 ! no return value
980 HCALL_RET(EOK)
981 SET_SIZE(hcall_mmu_tsb_ctx0)
982
983
984/*
985 * mmu_tsb_ctxnon0
986 *
987 * arg0 ntsb (%o0)
988 * arg1 tsbs (%o1)
989 * --
990 * ret0 status (%o0)
991 */
992 ENTRY_NP(hcall_mmu_tsb_ctxnon0)
993 CPU_GUEST_STRUCT(%g5, %g6)
994 INC_MMU_STAT(%g5, MMUSTAT_SETN0, %g2, %g3)
995 /* set cpu->ntsbs to zero now in case we error exit */
996 stx %g0, [%g5 + CPU_NTSBS_CTXN]
997 /* Also zero out H/W bases */
998 ba set_dummytsb_ctxN
999 rd %pc, %g7
1000 brz,pn %o0, setntsbsN
1001 cmp %o0, MAX_NTSB
1002 bgu,pn %xcc, herr_inval
1003 btst TSBD_ALIGNMENT - 1, %o1
1004 bnz,pn %xcc, herr_badalign
1005 sllx %o0, TSBD_SHIFT, %g3
1006 RANGE_CHECK(%g6, %o1, %g3, herr_noraddr, %g2)
1007 /* xcopy(tsbs, cpu->tsbds, ntsbs*TSBD_BYTES) */
1008 REAL_OFFSET(%g6, %o1, %g1, %g2)
1009 add %g5, CPU_TSBDS_CTXN, %g2
1010 ! xcopy trashes g1-4
1011 ba xcopy
1012 rd %pc, %g7
1013 /* loop over each TSBD and validate */
1014 mov %o0, %g1
1015 add %g5, CPU_TSBDS_CTXN, %g2
10161:
1017 /* check pagesize - accept any size encoding? XXX */
1018 /* XXX pageszidx is lowest-order bit of pageszmask */
1019 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
1020 cmp %g3, NPGSZ
1021 bgeu,pn %xcc, herr_badpgsz
1022 nop
1023 /* check associativity - only support 1-way */
1024 lduh [%g2 + TSBD_ASSOC_OFF], %g3
1025 cmp %g3, 1
1026 bne,pn %icc, herr_badtsb
1027 nop
1028 /* check TSB size */
1029 ld [%g2 + TSBD_SIZE_OFF], %g3
1030 sub %g3, 1, %g4
1031 btst %g3, %g4
1032 bnz,pn %icc, herr_badtsb
1033 mov TSB_SZ0_ENTRIES, %g4
1034 cmp %g3, %g4
1035 blt,pn %icc, herr_badtsb
1036 sll %g4, TSB_MAX_SZCODE, %g4
1037 cmp %g3, %g4
1038 bgt,pn %icc, herr_badtsb
1039 nop
1040 /* check context index field - must be -1 (shared) or zero */
1041 ld [%g2 + TSBD_CTX_INDEX], %g3
1042 cmp %g3, TSBD_CTX_IDX_SHARE
1043 be %icc, 2f ! -1 is OK
1044 nop
1045 brnz,pn %g3, herr_inval ! only one set of context regs
1046 nop
10472:
1048 /* check reserved field - must be zero for now */
1049 ldx [%g2 + TSBD_RSVD_OFF], %g3
1050 brnz,pn %g3, herr_inval
1051 nop
1052 /* check TSB base real address */
1053 ldx [%g2 + TSBD_BASE_OFF], %g3
1054 ld [%g2 + TSBD_SIZE_OFF], %g4
1055 sllx %g4, TSBE_SHIFT, %g4
1056 RANGE_CHECK(%g6, %g3, %g4, herr_noraddr, %g7)
1057 /* range OK, check alignment */
1058 sub %g4, 1, %g4
1059 btst %g3, %g4
1060 bnz,pn %xcc, herr_badalign
1061 sub %g1, 1, %g1
1062 brnz,pt %g1, 1b
1063 add %g2, TSBD_BYTES, %g2
1064
1065#ifdef N2
1066 /* now setup HWTW regs */
1067 /* XXX - only look at first two TSBDs for now */
1068 /* XXX - setup use_context if TSBD context not shared or zero */
1069
1070 /* process first TSBD */
1071 add %g5, CPU_TSBDS_CTXN, %g2
1072 ldx [%g2 + TSBD_BASE_OFF], %g1
1073 REAL_OFFSET(%g6, %g1, %g1, %g4) ! start with TSB base PA
1074
1075 ld [%g2 + TSBD_SIZE_OFF], %g4
1076 srl %g4, TSB_SZ0_SHIFT, %g4
10771:
1078 btst 1, %g4
1079 srl %g4, 1, %g4
1080 bz,a,pt %icc, 1b
1081 add %g1, 1, %g1 ! increment TSB size field
1082
1083 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
1084 sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
1085 or %g1, %g4, %g1 ! add page size field
1086 or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
1087 mov 1, %g4
1088 sllx %g4, 63, %g4
1089 or %g1, %g4, %g1 ! add valid bit
1090
1091 mov TSB_CFG_CTXN_0, %g4
1092 stxa %g1, [%g4]ASI_MMU_TSB
1093
1094 /* process second TSBD, if available */
1095 cmp %o0, 1
1096 be,pt %xcc, 2f
1097 add %g2, TSBD_BYTES, %g2 ! move to next TSBD
1098 ldx [%g2 + TSBD_BASE_OFF], %g1
1099 REAL_OFFSET(%g6, %g1, %g1, %g4) ! start with TSB base PA
1100 ld [%g2 + TSBD_SIZE_OFF], %g4
1101 srl %g4, TSB_SZ0_SHIFT, %g4
11021:
1103 btst 1, %g4
1104 srl %g4, 1, %g4
1105 bz,a,pt %icc, 1b
1106 add %g1, 1, %g1 ! increment TSB size field
1107
1108 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
1109 sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
1110 or %g1, %g4, %g1 ! add page size field
1111 or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
1112 mov 1, %g4
1113 sllx %g4, 63, %g4
1114 or %g1, %g4, %g1 ! add valid bit
1115
1116 mov TSB_CFG_CTXN_1, %g4
1117 stxa %g1, [%g4]ASI_MMU_TSB
11182:
1119 stx %o0, [%g5 + CPU_NTSBS_CTXN]
1120#else /* N2 */
1121 /* now setup H/W TSB regs */
1122 /* only look at first two TSBDs for now */
1123 add %g5, CPU_TSBDS_CTXN, %g2
1124 ldx [%g2 + TSBD_BASE_OFF], %g1
1125 REAL_OFFSET(%g6, %g1, %g1, %g4)
1126 ld [%g2 + TSBD_SIZE_OFF], %g4
1127 srl %g4, TSB_SZ0_SHIFT, %g4
11281:
1129 btst 1, %g4
1130 srl %g4, 1, %g4
1131 bz,a,pt %icc, 1b
1132 add %g1, 1, %g1 ! increment TSB size field
1133
1134 stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS0
1135 stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS0
1136
1137 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
1138 stxa %g3, [%g0]ASI_DTSB_CONFIG_CTXN ! (PS0 only)
1139 stxa %g3, [%g0]ASI_ITSB_CONFIG_CTXN ! (PS0 only)
1140
1141 /* process second TSBD, if available */
1142 cmp %o0, 1
1143 be,pt %xcc, 2f
1144 add %g2, TSBD_BYTES, %g2 ! move to next TSBD
1145 ldx [%g2 + TSBD_BASE_OFF], %g1
1146 REAL_OFFSET(%g6, %g1, %g1, %g4)
1147 ld [%g2 + TSBD_SIZE_OFF], %g4
1148 srl %g4, TSB_SZ0_SHIFT, %g4
11491:
1150 btst 1, %g4
1151 srl %g4, 1, %g4
1152 bz,a,pt %icc, 1b
1153 add %g1, 1, %g1 ! increment TSB size field
1154
1155 stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS1
1156 stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS1
1157
1158 /* %g3 still has old CONFIG value. */
1159 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g7
1160 sllx %g7, ASI_TSB_CONFIG_PS1_SHIFT, %g7
1161 or %g3, %g7, %g3
1162 stxa %g3, [%g0]ASI_DTSB_CONFIG_CTXN ! (PS0 + PS1)
1163 stxa %g3, [%g0]ASI_ITSB_CONFIG_CTXN ! (PS0 + PS1)
1164
11652:
1166 stx %o0, [%g5 + CPU_NTSBS_CTXN]
1167#endif /* N2 */
1168setntsbsN:
1169 clr %o1 ! no return value
1170 HCALL_RET(EOK)
1171 SET_SIZE(hcall_mmu_tsb_ctxnon0)
1172
1173
1174/*
1175 * mmu_tsb_ctx0_info
1176 *
1177 * arg0 maxtsbs (%o0)
1178 * arg1 tsbs (%o1)
1179 * --
1180 * ret0 status (%o0)
1181 * ret1 ntsbs (%o1)
1182 */
1183 ENTRY_NP(hcall_mmu_tsb_ctx0_info)
1184 CPU_GUEST_STRUCT(%g5, %g6)
1185 !! %g5 cpup
1186 !! %g6 guestp
1187
1188 ! actual ntsbs always returned in %o1, so save tsbs now
1189 mov %o1, %g4
1190 ! Check to see if ntsbs fits into the supplied buffer
1191 ldx [%g5 + CPU_NTSBS_CTX0], %o1
1192 brz,pn %o1, hret_ok
1193 cmp %o1, %o0
1194 bgu,pn %xcc, herr_inval
1195 nop
1196
1197 btst TSBD_ALIGNMENT - 1, %g4
1198 bnz,pn %xcc, herr_badalign
1199 sllx %o1, TSBD_SHIFT, %g3
1200 !! %g3 size of tsbd in bytes
1201 RANGE_CHECK(%g6, %g4, %g3, herr_noraddr, %g2)
1202 REAL_OFFSET(%g6, %g4, %g2, %g1)
1203 !! %g2 pa of buffer
1204 !! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
1205 add %g5, CPU_TSBDS_CTX0, %g1
1206 !! clobbers %g1-%g4
1207 ba xcopy
1208 rd %pc, %g7
1209
1210 HCALL_RET(EOK)
1211 SET_SIZE(hcall_mmu_tsb_ctx0_info)
1212
1213
1214/*
1215 * mmu_tsb_ctxnon0_info
1216 *
1217 * arg0 maxtsbs (%o0)
1218 * arg1 tsbs (%o1)
1219 * --
1220 * ret0 status (%o0)
1221 * ret1 ntsbs (%o1)
1222 */
1223 ENTRY_NP(hcall_mmu_tsb_ctxnon0_info)
1224 CPU_GUEST_STRUCT(%g5, %g6)
1225 !! %g5 cpup
1226 !! %g6 guestp
1227
1228 ! actual ntsbs always returned in %o1, so save tsbs now
1229 mov %o1, %g4
1230 ! Check to see if ntsbs fits into the supplied buffer
1231 ldx [%g5 + CPU_NTSBS_CTXN], %o1
1232 brz,pn %o1, hret_ok
1233 cmp %o1, %o0
1234 bgu,pn %xcc, herr_inval
1235 nop
1236
1237 btst TSBD_ALIGNMENT - 1, %g4
1238 bnz,pn %xcc, herr_badalign
1239 sllx %o1, TSBD_SHIFT, %g3
1240 !! %g3 size of tsbd in bytes
1241 RANGE_CHECK(%g6, %g4, %g3, herr_noraddr, %g2)
1242 REAL_OFFSET(%g6, %g4, %g2, %g1)
1243 !! %g2 pa of buffer
1244 !! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
1245 add %g5, CPU_TSBDS_CTXN, %g1
1246 !! clobbers %g1-%g4
1247 ba xcopy
1248 rd %pc, %g7
1249
1250 HCALL_RET(EOK)
1251 SET_SIZE(hcall_mmu_tsb_ctxnon0_info)
1252
1253
1254#ifdef MMU_STATS
1255/*
1256 * mmu_stat_area
1257 *
1258 * arg0 raddr (%o0)
1259 * arg1 size (%o1) (bytes)
1260 * --
1261 * ret0 status (%o0)
1262 * ret1 oldraddr (%o1)
1263 */
1264 ENTRY_NP(hcall_mmu_stat_area)
1265 btst (MMU_STAT_ALIGNMENT - 1), %o0 ! check alignment
1266 bnz,pn %xcc, herr_badalign
1267 cmp %o1, MMU_STAT_SIZE
1268 blu,pn %xcc, herr_inval
1269 CPU_GUEST_STRUCT(%g1, %g4)
1270 brz,a,pn %o0, 1f
1271 clr %g2
1272 RANGE_CHECK(%g4, %o0, %o1, herr_noraddr, %g3)
1273 REAL_OFFSET(%g4, %o0, %g2, %g3)
12741:
1275 ldx [%g1 + CPU_MMU_STATS_RA], %o1
1276 stx %o0, [%g1 + CPU_MMU_STATS_RA]
1277 stx %g2, [%g1 + CPU_MMU_STATS]
1278
1279 HCALL_RET(EOK)
1280 SET_SIZE(hcall_mmu_stat_area)
1281#endif /* MMU_STATS */
1282
1283/*
1284 * mmu_map_addr - stuff ttes directly into the tlbs
1285 *
1286 * arg0 vaddr (%o0)
1287 * arg1 ctx (%o1)
1288 * arg2 tte (%o2)
1289 * arg3 flags (%o3)
1290 * --
1291 * ret0 status (%o0)
1292 */
1293 ENTRY_NP(hcall_mmu_map_addr)
1294 CPU_GUEST_STRUCT(%g1, %g6)
1295
1296#ifdef STRICT_API
1297 CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
1298 CHECK_MMU_FLAGS(%o3, herr_inval)
1299#endif /* STRICT_API */
1300
1301 ! extract sz from tte
1302 TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
1303 sub %g4, 1, %g5 ! %g5 page mask
1304
1305 ! extract ra from tte
1306 sllx %o2, 64 - 40, %g2
1307 srlx %g2, 64 - 40 + 13, %g2
1308 sllx %g2, 13, %g2 ! %g2 real address
1309 xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
1310 andn %g2, %g5, %g2
1311 RANGE_CHECK(%g6, %g2, %g4, 3f, %g5)
1312 REAL_OFFSET(%g6, %g2, %g2, %g4)
13134: or %g3, %g2, %g1 ! %g1 new tte with pa
1314
1315#ifndef STRICT_API
1316#endif /* umm, N2HACK? */
1317 set (NCTXS - 1), %g3
1318 and %o1, %g3, %o1
1319 andn %o0, %g3, %o0
1320#ifdef N2HACKS /* umm, N2HACK? */
1321#endif /* STRICT_API */
1322 or %o0, %o1, %g2 ! %g2 tag
1323 mov MMU_TAG_ACCESS, %g3 ! %g3 tag_access
1324#ifndef N2
1325 mov 1, %g4
1326 sllx %g4, NI_TTE4V_L_SHIFT, %g4
1327 andn %g1, %g4, %g1 ! %g1 tte (force clear lock bit)
1328#endif /* N2 */
1329#ifdef MMU_STATS
1330 CPU_STRUCT(%g5)
1331 and %o2, TTE_SZ_MASK, %g7
1332 sllx %g7, 3, %g7 ! * _MMUSONE_MAPx_INCR
1333 brnz,a %o1, 9f
1334 add %g7, MMUSTAT_I+_MMUSONE_MAPN0, %g7
1335 add %g7, MMUSTAT_I+_MMUSONE_MAP0, %g7
13369:
1337 INC_MMU_STAT(%g5, %g7, %g4, %g6)
1338 ! XXXQ need to do MMUSTAT_D, check %o3
1339#endif /* MMU_STATS */
1340#ifndef N2
1341 set TLB_IN_4V_FORMAT, %g5 ! %g5 sun4v-style tte selection
1342#endif /* N2 */
1343
1344 btst MAP_DTLB, %o3
1345 bz 2f
1346 btst MAP_ITLB, %o3
1347
1348 stxa %g2, [%g3]ASI_DMMU
1349 membar #Sync
1350#ifdef N2
1351 stxa %g1, [%g0]ASI_DTLB_DATA_IN
1352#else /* N2 */
1353 stxa %g1, [%g5]ASI_DTLB_DATA_IN
1354#endif /* N2 */
1355 ! condition codes still set
13562: bz 1f
1357 nop
1358
1359 stxa %g2, [%g3]ASI_IMMU
1360 membar #Sync
1361#ifdef N2
1362 stxa %g1, [%g0]ASI_ITLB_DATA_IN
1363#else /* N2 */
1364 stxa %g1, [%g5]ASI_ITLB_DATA_IN
1365#endif /* N2 */
1366
13671: HCALL_RET(EOK)
1368
1369 ! Check for I/O
13703:
1371#ifdef CONFIG_IOBYPASS
1372 RANGE_CHECK_IO(%g6, %g2, %g4, 4b, 1f, %g1, %g5)
1373 ba,a 4b
1374#else
13751:
1376 IN_RANGE(%g1, %g2, %g4, FIRE_A_BASE0, FIRE_A_OFFSET0, FIRE_A_SIZE0,
1377 1f, %g5, %g6)
1378 ba,pt %xcc, 4b
1379 mov %g4, %g2
13801:
1381 IN_RANGE(%g1, %g2, %g4, FIRE_B_BASE0, FIRE_B_OFFSET0, FIRE_B_SIZE0,
1382 1f, %g5, %g6)
1383 ba,pt %xcc, 4b
1384 mov %g4, %g2
13851:
1386 IN_RANGE(%g1, %g2, %g4, FIRE_A_BASE1, FIRE_A_OFFSET1, FIRE_A_SIZE1,
1387 1f, %g5, %g6)
1388 ba,pt %xcc, 4b
1389 mov %g4, %g2
13901:
1391 IN_RANGE(%g1, %g2, %g4, FIRE_B_BASE1, FIRE_B_OFFSET1, FIRE_B_SIZE1,
1392 1f, %g5, %g6)
1393 ba,pt %xcc, 4b
1394 mov %g4, %g2
1395#endif /* CONFIG_IOBYPASS */
13961:
1397 ba,a herr_noraddr
1398 SET_SIZE(hcall_mmu_map_addr)
1399
1400
1401/*
1402 * mmu_unmap_addr
1403 *
1404 * arg0 vaddr (%o0)
1405 * arg1 ctx (%o1)
1406 * arg2 flags (%o2)
1407 * --
1408 * ret0 status (%o0)
1409 */
1410 ENTRY_NP(hcall_mmu_unmap_addr)
1411#ifdef STRICT_API
1412 CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
1413 CHECK_MMU_FLAGS(%o2, herr_inval)
1414#endif /* STRICT_API */
1415 mov MMU_PCONTEXT, %g1
1416 set (NCTXS - 1), %g2 ! 8K page mask
1417 andn %o0, %g2, %g2
1418 ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
1419 stxa %o1, [%g1]ASI_MMU ! switch to new ctx
1420 btst MAP_ITLB, %o2
1421 bz,pn %xcc, 1f
1422 btst MAP_DTLB, %o2
1423 stxa %g0, [%g2]ASI_IMMU_DEMAP
14241: bz,pn %xcc, 2f
1425 nop
1426 stxa %g0, [%g2]ASI_DMMU_DEMAP
14272: stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
1428 HCALL_RET(EOK)
1429 SET_SIZE(hcall_mmu_unmap_addr)
1430
1431
1432/*
1433 * mmu_demap_page
1434 *
1435 * arg0/1 cpulist (%o0/%o1)
1436 * arg2 vaddr (%o2)
1437 * arg3 ctx (%o3)
1438 * arg4 flags (%o4)
1439 * --
1440 * ret0 status (%o0)
1441 */
1442 ENTRY_NP(hcall_mmu_demap_page)
1443 orcc %o0, %o1, %g0
1444 bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
1445#ifdef STRICT_API
1446 nop
1447 CHECK_VA_CTX(%o2, %o3, herr_inval, %g2)
1448 CHECK_MMU_FLAGS(%o4, herr_inval)
1449#endif /* STRICT_API */
1450 mov MMU_PCONTEXT, %g1
1451 set (NCTXS - 1), %g2
1452 andn %o2, %g2, %g2
1453 ldxa [%g1]ASI_MMU, %g3
1454 stxa %o3, [%g1]ASI_MMU
1455 btst MAP_ITLB, %o4
1456 bz,pn %xcc, 1f
1457 btst MAP_DTLB, %o4
1458 stxa %g0, [%g2]ASI_IMMU_DEMAP
14591: bz,pn %xcc, 2f
1460 nop
1461 stxa %g0, [%g2]ASI_DMMU_DEMAP
14622: stxa %g3, [%g1]ASI_MMU ! restore primary ctx
1463 HCALL_RET(EOK)
1464 SET_SIZE(hcall_mmu_demap_page)
1465
1466
1467/*
1468 * hcall_mmu_demap_ctx
1469 *
1470 * arg0/1 cpulist (%o0/%o1)
1471 * arg2 ctx (%o2)
1472 * arg3 flags (%o3)
1473 * --
1474 * ret0 status (%o0)
1475 */
1476 ENTRY_NP(hcall_mmu_demap_ctx)
1477 orcc %o0, %o1, %g0
1478 bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
1479#ifdef STRICT_API
1480 nop
1481 CHECK_CTX(%o2, herr_inval, %g2)
1482 CHECK_MMU_FLAGS(%o3, herr_inval)
1483#endif /* STRICT_API */
1484 set TLB_DEMAP_CTX_TYPE, %g3
1485 mov MMU_PCONTEXT, %g2
1486 ldxa [%g2]ASI_MMU, %g7
1487 stxa %o2, [%g2]ASI_MMU
1488 btst MAP_ITLB, %o3
1489 bz,pn %xcc, 1f
1490 btst MAP_DTLB, %o3
1491 stxa %g0, [%g3]ASI_IMMU_DEMAP
14921: bz,pn %xcc, 2f
1493 nop
1494 stxa %g0, [%g3]ASI_DMMU_DEMAP
14952: stxa %g7, [%g2]ASI_MMU ! restore primary ctx
1496 HCALL_RET(EOK)
1497 SET_SIZE(hcall_mmu_demap_ctx)
1498
1499
1500/*
1501 * hcall_mmu_demap_all
1502 *
1503 * arg0/1 cpulist (%o0/%o1)
1504 * arg2 flags (%o2)
1505 * --
1506 * ret0 status (%o0)
1507 */
1508 ENTRY_NP(hcall_mmu_demap_all)
1509 orcc %o0, %o1, %g0
1510 bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
1511#ifdef STRICT_API
1512 nop
1513 CHECK_MMU_FLAGS(%o2, herr_inval)
1514#endif /* STRICT_API */
1515 set TLB_DEMAP_ALL_TYPE, %g3
1516 btst MAP_ITLB, %o2
1517 bz,pn %xcc, 1f
1518 btst MAP_DTLB, %o2
1519 stxa %g0, [%g3]ASI_IMMU_DEMAP
15201: bz,pn %xcc, 2f
1521 nop
1522 stxa %g0, [%g3]ASI_DMMU_DEMAP
15232: HCALL_RET(EOK)
1524 SET_SIZE(hcall_mmu_demap_all)
1525
1526
1527#ifdef N2
1528/*
1529 * mappings: pointer to current mappings, not modified
1530 * vaddr: not modified
1531 * tte: not modified
1532 * scr1, scr2 scr3, scr4: scratch
1533 */
1534#define UPDATE_PERM_MAPPINGS(mappings, vaddr, tte, scr1, scr2, scr3, scr4) \
1535 .pushlocals ;\
1536 /* first, try searching for an existing entry */ ;\
1537 TTE_SHIFT_NOCHECK(tte, scr1, scr3) ;\
1538 srlx vaddr, scr3, scr4 /* scr4 is current tag */ ;\
1539 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), scr1 ;\
15401: ;\
1541 add mappings, scr1, scr2 ;\
1542 MUTEX_ENTER(scr2 + MAPPING_LOCK, scr3) ;\
1543 ldx [scr2 + MAPPING_TTE], scr3 ;\
1544 brgez,pn scr3, 2f ;\
1545 ldx [scr2 + MAPPING_TAG], scr3 ;\
1546 cmp scr4, scr3 ;\
1547 be,pn %xcc, 3f ;\
1548 nop ;\
15492: ;\
1550 MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
1551 subcc scr1, MAPPING_SIZE, scr1 ;\
1552 bgeu,pt %xcc, 1b ;\
1553 nop ;\
15543: ;\
1555 brlz,pn scr1, 4f /* ? matching entry found */ ;\
1556 nop ;\
1557 /* found a valid matching entry, update its refcnt */ ;\
1558 ld [scr2 + MAPPING_REFCNT], scr3 ;\
1559 inc scr3 ;\
1560 st scr3, [scr2 + MAPPING_REFCNT] ;\
1561 MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
1562 ba,a 9f ;\
15634: ;\
1564 /* second, try searching for a free entry */ ;\
1565 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), scr1 ;\
15665: ;\
1567 add mappings, scr1, scr2 ;\
1568 MUTEX_ENTER(scr2 + MAPPING_LOCK, scr3) ;\
1569 ldx [scr2 + MAPPING_TTE], scr3 ;\
1570 brgez,pn scr3, 6f ;\
1571 nop ;\
1572 /* check tag, in case of parellel insert just update refcnt */ ;\
1573 ldx [scr2 + MAPPING_TAG], scr3 ;\
1574 cmp scr4, scr3 ;\
1575 be,pn %xcc, 3b ;\
1576 nop ;\
1577 MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
1578 subcc scr1, MAPPING_SIZE, scr1 ;\
1579 bgeu,pt %xcc, 5b ;\
1580 nop ;\
15816: ;\
1582 brlz,pn scr1, herr_inval /* ? free entry found */ ;\
1583 nop ;\
1584 /* found a free entry, update its contents */ ;\
1585 TTE_SHIFT_NOCHECK(tte, scr3, scr4) ;\
1586 srlx vaddr, scr3, scr3 ;\
1587 stx scr3, [scr2 + MAPPING_TAG] ;\
1588 stx tte, [scr2 + MAPPING_TTE] ;\
1589 mov 1, scr3 ;\
1590 st scr3, [scr2 + MAPPING_REFCNT] ;\
1591 MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
15929: ;\
1593 .poplocals ;
1594
1595#endif /* N2 */
1596
1597/*
1598 * mmu_map_perm_addr
1599 *
1600 * arg0 vaddr (%o0)
1601 * arg1 context (%o1) must be zero
1602 * arg2 tte (%o2)
1603 * arg3 flags (%o3)
1604 * --
1605 * ret0 status (%o0)
1606 */
1607 ENTRY_NP(hcall_mmu_map_perm_addr)
1608 brnz,pn %o1, herr_inval
1609 CPU_GUEST_STRUCT(%g1, %g6)
1610
1611 CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
1612 CHECK_MMU_FLAGS(%o3, herr_inval)
1613
1614 ! extract sz from tte
1615 TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
1616 sub %g4, 1, %g5 ! %g5 page mask
1617
1618 ! extract ra from tte
1619 sllx %o2, 64 - 40, %g2
1620 srlx %g2, 64 - 40 + 13, %g2
1621 sllx %g2, 13, %g2 ! %g2 real address
1622 xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
1623 andn %g2, %g5, %g2
1624 RANGE_CHECK(%g6, %g2, %g4, herr_noraddr, %g5)
1625 REAL_OFFSET(%g6, %g2, %g2, %g4)
1626 or %g3, %g2, %g2 ! %g2 new tte with pa XXXshould be %g1
1627
1628#ifndef N2HACKS
1629#if 1 /* XXX */
1630 /*
1631 * need to track the mappings, keep track of which cpus have
1632 * the same mapping, demap on each core when all strands on
1633 * that core have unmapped it
1634 */
1635
1636 /* Search for existing perm mapping */
1637 add %g6, GUEST_PERM_MAPPINGS, %g1
1638 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
1639 mov %g0, %g6
1640 add %g1, %g3, %g4
16415:
1642 ldx [%g4 + MAPPING_TTE], %g5
1643 brlz,pn %g5, 6f
1644 nop
1645 mov %g4, %g6 ! %g6 = last free offset
16467: subcc %g3, MAPPING_SIZE, %g3
1647 bgeu,pt %xcc, 5b
1648 add %g1, %g3, %g4
1649
1650 /* End-of-loop */
1651 brz,pn %g6, herr_toomany ! No free entry found
1652 nop
1653
1654 stx %o0, [%g6 + MAPPING_VA]
1655 stx %o2, [%g6 + MAPPING_TTE]
1656 ba,pt %xcc, 8f
1657 stx %o3, [%g6 + MAPPING_FLAGS]
1658
16596: /* found a valid mapping, check tag */
1660 ldx [%g4 + MAPPING_VA], %g5
1661 cmp %g5, %o0
1662 bne,pt %xcc, 7b
1663 nop
1664
1665 /* i-and/or-d? */
1666 ldx [%g4 + MAPPING_FLAGS], %g5
1667 or %g5, %o3, %g5
1668 stx %g5, [%g4 + MAPPING_FLAGS]
16698:
1670#endif /* XXX */
1671#endif /* N2HACKS */
1672
1673#ifdef N2
1674 btst MAP_ITLB, %o3
1675 bz,pn %xcc, 1f
1676 nop
1677 CPU2CORE_STRUCT(%g1, %g6)
1678 add %g6, CORE_PERM_I_MAPPINGS, %g6
1679 UPDATE_PERM_MAPPINGS(%g6, %o0, %g2, %g3, %g4, %g5, %g7)
16801:
1681 btst MAP_DTLB, %o3
1682 bz,pn %xcc, 2f
1683 nop
1684 CPU2CORE_STRUCT(%g1, %g6)
1685 add %g6, CORE_PERM_D_MAPPINGS, %g6
1686 UPDATE_PERM_MAPPINGS(%g6, %o0, %g2, %g3, %g4, %g5, %g7)
16872:
1688#endif /* N2 */
1689
1690 mov %g2, %g1 ! XXX
1691 set (NCTXS - 1), %g2
1692 andn %o0, %g2, %g2
1693 mov MMU_TAG_ACCESS, %g3
1694#ifndef N2
1695 mov 1, %g4
1696 sllx %g4, NI_TTE4V_L_SHIFT, %g4
1697 or %g1, %g4, %g1 ! add lock bit
1698#endif /* N2 */
1699#ifdef MMU_STATS
1700 CPU_STRUCT(%g5)
1701 and %o2, TTE_SZ_MASK, %g7
1702 sllx %g7, 3, %g7 ! * _MMUSONE_MAPx_INCR
1703 brnz,a %o1, 9f
1704 add %g7, MMUSTAT_I+_MMUSONE_MAPN0, %g7
1705 add %g7, MMUSTAT_I+_MMUSONE_MAP0, %g7
17069:
1707 INC_MMU_STAT(%g5, %g7, %g4, %g6)
1708 ! XXXQ need to do MMUSTAT_D, check %o3
1709 ! XXXQ separate counts for perm?
1710#endif /* MMU_STATS */
1711#ifndef N2
1712 set TLB_IN_4V_FORMAT, %g5 ! sun4v-style tte selection
1713#endif /* N2 */
1714
1715 btst MAP_ITLB, %o3
1716 bz,pn %xcc, 1f
1717 btst MAP_DTLB, %o3
1718 stxa %g2, [%g3]ASI_IMMU
1719 membar #Sync
1720#ifdef N2
1721 stxa %g1, [%g0]ASI_ITLB_DATA_IN
1722#else /* N2 */
1723 stxa %g1, [%g5]ASI_ITLB_DATA_IN
1724#endif /* N2 */
1725 membar #Sync
1726 ! condition codes still set
17271: bz,pn %xcc, 2f
1728 nop
1729 stxa %g2, [%g3]ASI_DMMU
1730 membar #Sync
1731#ifdef N2
1732 stxa %g1, [%g0]ASI_DTLB_DATA_IN
1733#else /* N2 */
1734 stxa %g1, [%g5]ASI_DTLB_DATA_IN
1735#endif /* N2 */
1736 membar #Sync
17372:
1738 HCALL_RET(EOK)
1739 SET_SIZE(hcall_mmu_map_perm_addr)
1740
1741
1742#ifdef N2
1743/*
1744 * mappings: pointer to current mappings, not modified
1745 * vaddr: not modified
1746 * scr1, scr2, scr3, scr4: scratch
1747 */
1748#define UNMAP_PERM_MAPPINGS(mappings, vaddr, scr1, scr2, scr3, scr4) \
1749 /* XXX - ignore context */ ;\
1750 .pushlocals ;\
1751 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), scr1 ;\
17521: ;\
1753 add mappings, scr1, scr2 ;\
1754 MUTEX_ENTER(scr2 + MAPPING_LOCK, scr3) ;\
1755 ldx [scr2 + MAPPING_TTE], scr3 ;\
1756 brgez,pn scr3, 2f ;\
1757 nop ;\
1758 TTE_SHIFT_NOCHECK(scr3, scr2, scr4) ;\
1759 srlx vaddr, scr2, scr4 ;\
1760 add mappings, scr1, scr2 ;\
1761 ldx [scr2 + MAPPING_TAG], scr3 ;\
1762 cmp scr4, scr3 ;\
1763 be,pn %xcc, 3f ;\
1764 nop ;\
17652: ;\
1766 MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
1767 subcc scr1, MAPPING_SIZE, scr1 ;\
1768 bgeu,pt %xcc, 1b ;\
1769 nop ;\
17703: ;\
1771 brlz,pn scr1, herr_nomap /* ? matching entry found */ ;\
1772 nop ;\
1773 ld [scr2 + MAPPING_REFCNT], scr3 ;\
1774 deccc scr3 ;\
1775 bnz %icc, 4f ;\
1776 st scr3, [scr2 + MAPPING_REFCNT] ;\
1777 stx %g0, [scr2 + MAPPING_TTE] ;\
17784: ;\
1779 MUTEX_EXIT(scr2 + MAPPING_LOCK) ;\
1780 .poplocals ;
1781
1782#endif /* N2 */
1783
1784/*
1785 * mmu_unmap_perm_addr
1786 *
1787 * arg0 vaddr (%o0)
1788 * arg1 ctx (%o1)
1789 * arg2 flags (%o2)
1790 * --
1791 * ret0 status (%o0)
1792 */
1793 ENTRY_NP(hcall_mmu_unmap_perm_addr)
1794 CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
1795 CHECK_MMU_FLAGS(%o2, herr_inval)
1796#if 0 /* XXX Need to update the list of perm mappings */
1797 ! if no mapping found, return ENOMAP
1798#endif
1799#ifdef N2
1800 btst MAP_ITLB, %o2
1801 bz,pn %xcc, 1f
1802 nop
1803 CORE_STRUCT(%g6)
1804 add %g6, CORE_PERM_I_MAPPINGS, %g6
1805 UNMAP_PERM_MAPPINGS(%g6, %o0, %g1, %g3, %g4, %g5)
18061: btst MAP_DTLB, %o2
1807 bz,pn %xcc, 2f
1808 nop
1809 CORE_STRUCT(%g6)
1810 add %g6, CORE_PERM_D_MAPPINGS, %g6
1811 UNMAP_PERM_MAPPINGS(%g6, %o0, %g1, %g3, %g4, %g5)
18122:
1813#endif /* N2 */
1814 mov MMU_PCONTEXT, %g1
1815 set (NCTXS - 1), %g2 ! 8K page mask
1816 andn %o0, %g2, %g2
1817 ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
1818 stxa %o1, [%g1]ASI_MMU ! switch to new ctx
1819 btst MAP_ITLB, %o2
1820 bz,pn %xcc, 1f
1821 btst MAP_DTLB, %o2
1822 stxa %g0, [%g2]ASI_IMMU_DEMAP
18231: bz,pn %xcc, 2f
1824 nop
1825 stxa %g0, [%g2]ASI_DMMU_DEMAP
18262: stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
1827 HCALL_RET(EOK)
1828 SET_SIZE(hcall_mmu_unmap_perm_addr)
1829
1830
1831/*
1832 * qconf
1833 *
1834 * arg0 queue (%o0)
1835 * arg1 base raddr (%o1)
1836 * arg2 size (#entries, not #bytes) (%o2)
1837 * --
1838 * ret0 status (%o0)
1839 */
1840 ENTRY_NP(hcall_qconf)
1841 sllx %o2, Q_EL_SIZE_SHIFT, %g4 ! convert #entries to bytes
1842 CPU_STRUCT(%g1)
1843
1844 ! size of 0 unconfigures queue
1845 brnz,pt %o2, 1f
1846 nop
1847
1848 /*
1849 * Set the stored configuration to relatively safe values
1850 * when un-initializing the queue
1851 */
1852 mov -1, %g2
1853 mov -1, %o1
1854 ba,pt %xcc, 2f
1855 mov 0, %g4
1856
18571:
1858 cmp %o2, MIN_QUEUE_ENTRIES
1859 blu,pn %xcc, herr_inval
1860 .empty
1861
1862 cmp %o2, MAX_QUEUE_ENTRIES
1863 bgu,pn %xcc, herr_inval
1864 .empty
1865
1866 ! check that size is a power of two
1867 sub %o2, 1, %g2
1868 andcc %o2, %g2, %g0
1869 bnz,pn %xcc, herr_inval
1870 .empty
1871
1872 ! Check base raddr alignment
1873 sub %g4, 1, %g2 ! size in bytes to mask
1874 btst %o1, %g2
1875 bnz,pn %xcc, herr_badalign
1876 .empty
1877
1878 ldx [%g1 + CPU_GUEST], %g6
1879 RANGE_CHECK(%g6, %o1, %g4, herr_noraddr, %g2)
1880 REAL_OFFSET(%g6, %o1, %g2, %g3)
1881
1882 ! %g2 - queue paddr
1883 ! %g4 - queue size (#bytes)
1884 dec %g4
1885 ! %g4 - queue mask
1886
18872:
1888 cmp %o0, CPU_MONDO_QUEUE
1889 be,pn %xcc, qconf_cpuq
1890 cmp %o0, DEV_MONDO_QUEUE
1891 be,pn %xcc, qconf_devq
1892 cmp %o0, ERROR_RESUMABLE_QUEUE
1893 be,pn %xcc, qconf_errrq
1894 cmp %o0, ERROR_NONRESUMABLE_QUEUE
1895 bne,pn %xcc, herr_inval
1896 nop
1897
1898qconf_errnrq:
1899 stx %g2, [%g1 + CPU_ERRQNR_BASE]
1900 stx %o1, [%g1 + CPU_ERRQNR_BASE_RA]
1901 stx %o2, [%g1 + CPU_ERRQNR_SIZE]
1902 stx %g4, [%g1 + CPU_ERRQNR_MASK]
1903 mov ERROR_NONRESUMABLE_QUEUE_HEAD, %g3
1904 stxa %g0, [%g3]ASI_QUEUE
1905 mov ERROR_NONRESUMABLE_QUEUE_TAIL, %g3
1906 ba,pt %xcc, 4f
1907 stxa %g0, [%g3]ASI_QUEUE
1908
1909qconf_errrq:
1910 stx %g2, [%g1 + CPU_ERRQR_BASE]
1911 stx %o1, [%g1 + CPU_ERRQR_BASE_RA]
1912 stx %o2, [%g1 + CPU_ERRQR_SIZE]
1913 stx %g4, [%g1 + CPU_ERRQR_MASK]
1914 mov ERROR_RESUMABLE_QUEUE_HEAD, %g3
1915 stxa %g0, [%g3]ASI_QUEUE
1916 mov ERROR_RESUMABLE_QUEUE_TAIL, %g3
1917 ba,pt %xcc, 4f
1918 stxa %g0, [%g3]ASI_QUEUE
1919
1920qconf_devq:
1921 stx %g2, [%g1 + CPU_DEVQ_BASE]
1922 stx %o1, [%g1 + CPU_DEVQ_BASE_RA]
1923 stx %o2, [%g1 + CPU_DEVQ_SIZE]
1924 stx %g4, [%g1 + CPU_DEVQ_MASK]
1925 mov DEV_MONDO_QUEUE_HEAD, %g3
1926 stxa %g0, [%g3]ASI_QUEUE
1927 mov DEV_MONDO_QUEUE_TAIL, %g3
1928 ba,pt %xcc, 4f
1929 stxa %g0, [%g3]ASI_QUEUE
1930
1931qconf_cpuq:
1932 stx %g2, [%g1 + CPU_CPUQ_BASE]
1933 stx %o1, [%g1 + CPU_CPUQ_BASE_RA]
1934 stx %o2, [%g1 + CPU_CPUQ_SIZE]
1935 stx %g4, [%g1 + CPU_CPUQ_MASK]
1936 mov CPU_MONDO_QUEUE_HEAD, %g3
1937 stxa %g0, [%g3]ASI_QUEUE
1938 mov CPU_MONDO_QUEUE_TAIL, %g3
1939 stxa %g0, [%g3]ASI_QUEUE
1940
19414:
1942 HCALL_RET(EOK)
1943 SET_SIZE(hcall_qconf)
1944
1945
1946/*
1947 * qinfo
1948 *
1949 * arg0 queue (%o0)
1950 * --
1951 * ret0 status (%o0)
1952 * ret1 base raddr (%o1)
1953 * ret2 size (#entries) (%o2)
1954 */
1955 ENTRY_NP(hcall_qinfo)
1956 CPU_STRUCT(%g1)
1957
1958 cmp %o0, CPU_MONDO_QUEUE
1959 be,pn %xcc, qinfo_cpuq
1960 cmp %o0, DEV_MONDO_QUEUE
1961 be,pn %xcc, qinfo_devq
1962 cmp %o0, ERROR_RESUMABLE_QUEUE
1963 be,pn %xcc, qinfo_errrq
1964 cmp %o0, ERROR_NONRESUMABLE_QUEUE
1965 bne,pn %xcc, herr_inval
1966 nop
1967qinfo_errnrq:
1968 ldx [%g1 + CPU_ERRQNR_BASE_RA], %o1
1969 ba,pt %xcc, 1f
1970 ldx [%g1 + CPU_ERRQNR_SIZE], %o2
1971
1972qinfo_errrq:
1973 ldx [%g1 + CPU_ERRQR_BASE_RA], %o1
1974 ba,pt %xcc, 1f
1975 ldx [%g1 + CPU_ERRQR_SIZE], %o2
1976
1977qinfo_devq:
1978 ldx [%g1 + CPU_DEVQ_BASE_RA], %o1
1979 ba,pt %xcc, 1f
1980 ldx [%g1 + CPU_DEVQ_SIZE], %o2
1981
1982qinfo_cpuq:
1983 ldx [%g1 + CPU_CPUQ_BASE_RA], %o1
1984 ldx [%g1 + CPU_CPUQ_SIZE], %o2
1985
19861:
1987 HCALL_RET(EOK)
1988 SET_SIZE(hcall_qinfo)
1989
1990
1991/*
1992 * cpu_start
1993 *
1994 * arg0 cpu (%o0)
1995 * arg1 pc (%o1)
1996 * arg2 rtba (%o2)
1997 * arg3 arg (%o3)
1998 * --
1999 * ret0 status (%o0)
2000 */
2001 ENTRY_NP(hcall_cpu_start)
2002 CPU_GUEST_STRUCT(%g6, %g7)
2003 !! %g6 = CPU
2004 !! %g7 = guest
2005
2006 cmp %o0, NCPUS
2007 bgeu,pn %xcc, herr_nocpu
2008 nop
2009
2010 ! Check pc (real) and tba (real) for validity
2011 RANGE_CHECK(%g7, %o1, INSTRUCTION_SIZE, herr_noraddr, %g1)
2012 RANGE_CHECK(%g7, %o2, REAL_TRAPTABLE_SIZE, herr_noraddr, %g1)
2013 btst (INSTRUCTION_ALIGNMENT - 1), %o1 ! Check pc alignment
2014 bnz,pn %xcc, herr_badalign
2015 set REAL_TRAPTABLE_SIZE - 1, %g1
2016 btst %o2, %g1
2017 bnz,pn %xcc, herr_badalign
2018 nop
2019
2020 ! Check current state of requested cpu
2021 sllx %o0, 3, %g1
2022 mov GUEST_VCPUS, %g2
2023 add %g1, %g2, %g1 ! %g1 = vcpus[n] offset
2024 ldx [%g7 + %g1], %g1 ! %g1 = guest.vcpus[n]
2025 brz,pn %g1, herr_nocpu
2026 nop
2027 !! %g1 requested CPU cpu struct
2028
2029 ldx [%g1 + CPU_STATUS], %g2
2030 cmp %g2, CPU_STATE_STOPPED
2031 bne,pn %xcc, herr_inval
2032 nop
2033
2034 /* Check to see if the mailbox is available */
2035 add %g1, CPU_COMMAND, %g2
2036 mov CPU_CMD_BUSY, %g4
2037 casxa [%g2]ASI_P, %g0, %g4
2038 brnz,pn %g4, herr_wouldblock
2039 nop
2040
2041 stx %o1, [%g1 + CPU_CMD_ARG0]
2042 stx %o2, [%g1 + CPU_CMD_ARG1]
2043 stx %o3, [%g1 + CPU_CMD_ARG2]
2044#ifdef RESETCONFIG_BROKENTICK
2045 rdpr %tick, %g2
2046 stx %g2, [%g1 + CPU_CMD_ARG3]
2047#endif
2048
2049 membar #StoreStore
2050 mov CPU_CMD_STARTGUEST, %g2
2051 stx %g2, [%g1 + CPU_COMMAND]
2052
2053 HCALL_RET(EOK)
2054 SET_SIZE(hcall_cpu_start)
2055
2056
2057/*
2058 * cpu_stop
2059 *
2060 * arg0 cpu (%o0)
2061 * --
2062 * ret0 status (%o0)
2063 */
2064 ENTRY_NP(hcall_cpu_stop)
2065 HCALL_RET(EBADTRAP) /* XXX */
2066 SET_SIZE(hcall_cpu_stop)
2067
2068/*
2069 * cpu_state
2070 *
2071 * arg0 cpu (%o0)
2072 * --
2073 * ret0 status (%o0)
2074 * ret1 state (%o1)
2075 */
2076 ENTRY_NP(hcall_cpu_state)
2077 GUEST_STRUCT(%g1)
2078 VCPUID2CPUP(%g1, %o0, %g2, herr_nocpu, %g3)
2079 !! %g2 = target cpup
2080
2081 ldx [%g2 + CPU_STATUS], %o1
2082 ! ASSERT(%o1 != CPU_STATE_INVALID)
2083 cmp %o1, CPU_STATE_LAST_PUBLIC
2084 movgu %xcc, CPU_STATE_ERROR, %o1 ! Any non-API state is ERROR
2085 HCALL_RET(EOK)
2086 SET_SIZE(hcall_cpu_state)
2087
2088
2089/*
2090 * hcall_mem_scrub
2091 *
2092 * arg0 real address (%o0)
2093 * arg1 length (%o1)
2094 * --
2095 * ret0 status (%o0)
2096 * EOK : success or partial success
2097 * ENORADDR : invalid (bad) address
2098 * EBADALIGN : bad alignment
2099 * ret1 length scrubbed (%o1)
2100 */
2101 ENTRY_NP(hcall_mem_scrub)
2102 brz,pn %o1, herr_inval ! length 0 invalid
2103 or %o0, %o1, %g1 ! address and length
2104 btst L2_LINE_SIZE - 1, %g1 ! aligned?
2105 bnz,pn %xcc, herr_badalign ! no: error
2106 nop
2107
2108 CPU_GUEST_STRUCT(%g6, %g5)
2109
2110 /* Check input arguments with guest map: error ret: r0=ENORADDR */
2111 RANGE_CHECK(%g5, %o0, %o1, herr_noraddr, %g1)
2112 REAL_OFFSET(%g5, %o0, %o0, %g1) /* real => physical address */
2113
2114 /* Get Max length: */
2115 ldx [%g6 + CPU_ROOT], %g2 ! root (config) struct
2116 ldx [%g2 + CONFIG_MEMSCRUB_MAX], %g5 ! limit (# cache lines)
2117
2118 /* Compute max # lines: */
2119 srlx %o1, L2_LINE_SHIFT, %g2 ! # input cache lines
2120 cmp %g5, %g2 ! g2 = min(inp, max)
2121 movlu %xcc, %g5, %g2 ! ..
2122 sllx %g2, L2_LINE_SHIFT, %o1 ! ret1 = count scrubbed
2123
2124 /*
2125 * This is the core of this function.
2126 * All of the code before and after has been optimized to make this
2127 * and the most common path the fastest.
2128 */
2129 wr %g0, ASI_BLK_INIT_P, %asi
2130.ms_clear_mem:
2131 stxa %g0, [%o0 + (0 * 8)]%asi
2132 stxa %g0, [%o0 + (1 * 8)]%asi
2133 stxa %g0, [%o0 + (2 * 8)]%asi
2134 stxa %g0, [%o0 + (3 * 8)]%asi
2135 stxa %g0, [%o0 + (4 * 8)]%asi
2136 stxa %g0, [%o0 + (5 * 8)]%asi
2137 stxa %g0, [%o0 + (6 * 8)]%asi
2138 stxa %g0, [%o0 + (7 * 8)]%asi
2139 deccc 1, %g2
2140 bnz,pt %xcc, .ms_clear_mem
2141 inc 64, %o0
2142 HCALL_RET(EOK) ! ret0=status, ret1=count
2143 SET_SIZE(hcall_mem_scrub)
2144
2145
2146/*
2147 * hcall_mem_sync
2148 *
2149 * arg0 real address (%o0)
2150 * arg1 length (%o1)
2151 * --
2152 * ret0 (%o0):
2153 * EOK : success, partial success
2154 * ENORADDR : bad address
2155 * EBADALIGN : bad alignment
2156 * ret1 (%o1):
2157 * length synced
2158 */
2159 ENTRY_NP(hcall_mem_sync)
2160 brz,pn %o1, herr_inval ! len 0 not valid
2161 or %o0, %o1, %g2
2162 set MEMSYNC_ALIGNMENT - 1, %g3
2163 btst %g3, %g2 ! check for alignment of addr/len
2164 bnz,pn %xcc, herr_badalign
2165 .empty
2166
2167 CPU_STRUCT(%g5)
2168 RANGE_CHECK(%g5, %o0, %o1, herr_noraddr, %g1)
2169 REAL_OFFSET(%g5, %o0, %o0, %g1) /* real => physical address ? */
2170 !! %o0 pa
2171 !! %o1 length
2172
2173 /*
2174 * Clamp requested length at MEMSCRUB_MAX
2175 */
2176 ldx [%g5 + CPU_ROOT], %g2
2177 ldx [%g2 + CONFIG_MEMSCRUB_MAX], %g3
2178 sllx %g3, L2_LINE_SHIFT, %g3
2179 cmp %o1, %g3
2180 movgu %xcc, %g3, %o1
2181 !! %o1 MIN(requested length, max length)
2182
2183 /*
2184 * Push cache lines to memory
2185 */
2186 sub %o1, L2_LINE_SIZE, %o5
2187 !! %o5 loop counter
2188 add %o0, %o5, %g1 ! hoisted delay slot (see below)
21891:
2190 ba l2_flush_line
2191 rd %pc, %g7
2192 deccc L2_LINE_SIZE, %o5 ! get to next line
2193 bgeu,pt %xcc, 1b
2194 add %o0, %o5, %g1 ! %g1 is pa to flush
2195
2196 HCALL_RET(EOK)
2197 SET_SIZE(hcall_mem_sync)
2198
2199
2200/*
2201 * intr_devino2sysino
2202 *
2203 * arg0 dev handle [dev config pa] (%o0)
2204 * arg1 devino (%o1)
2205 * --
2206 * ret0 status (%o0)
2207 * ret1 sysino (%o1)
2208 *
2209 */
2210 ENTRY_NP(hcall_intr_devino2sysino)
2211 JMPL_DEVHANDLE2DEVOP(%o0, DEVOPSVEC_DEVINO2VINO, %g1, %g2, %g3, \
2212 herr_inval)
2213 SET_SIZE(hcall_intr_devino2sysino)
2214
2215/*
2216 * intr_getenabled
2217 *
2218 * arg0 sysino (%o0)
2219 * --
2220 * ret0 status (%o0)
2221 * ret1 intr valid state (%o1)
2222 */
2223 ENTRY_NP(hcall_intr_getenabled)
2224 JMPL_VINO2DEVOP(%o0, DEVOPSVEC_GETVALID, %g1, %g2, herr_inval)
2225 SET_SIZE(hcall_intr_getenabled)
2226
2227/*
2228 * intr_setenabled
2229 *
2230 * arg0 sysino (%o0)
2231 * arg1 intr valid state (%o1) 1: Valid 0: Invalid
2232 * --
2233 * ret0 status (%o0)
2234 */
2235 ENTRY_NP(hcall_intr_setenabled)
2236 cmp %o1, INTR_ENABLED_MAX_VALUE
2237 bgu,pn %xcc, herr_inval
2238 nop
2239 JMPL_VINO2DEVOP(%o0, DEVOPSVEC_SETVALID, %g1, %g2, herr_inval)
2240 SET_SIZE(hcall_intr_setenabled)
2241
2242/*
2243 * intr_getstate
2244 *
2245 * arg0 sysino (%o0)
2246 * --
2247 * ret0 status (%o0)
2248 * ret1 (%o1) 0: idle 1: received 2: delivered
2249 */
2250 ENTRY_NP(hcall_intr_getstate)
2251 JMPL_VINO2DEVOP(%o0, DEVOPSVEC_GETSTATE, %g1, %g2, herr_inval)
2252 SET_SIZE(hcall_intr_getstate)
2253
2254/*
2255 * intr_setstate
2256 *
2257 * arg0 sysino (%o0)
2258 * arg1 (%o1) 0: idle 1: received 2: delivered
2259 * --
2260 * ret0 status (%o0)
2261 */
2262 ENTRY_NP(hcall_intr_setstate)
2263 JMPL_VINO2DEVOP(%o0, DEVOPSVEC_SETSTATE, %g1, %g2, herr_inval)
2264 SET_SIZE(hcall_intr_setstate)
2265
2266/*
2267 * intr_gettarget
2268 *
2269 * arg0 sysino (%o0)
2270 * --
2271 * ret0 status (%o0)
2272 * ret1 cpuid (%o1)
2273 */
2274 ENTRY_NP(hcall_intr_gettarget)
2275 JMPL_VINO2DEVOP(%o0, DEVOPSVEC_GETTARGET, %g1, %g2, herr_inval)
2276 SET_SIZE(hcall_intr_gettarget)
2277
2278/*
2279 * intr_settarget
2280 *
2281 * arg0 sysino (%o0)
2282 * arg1 cpuid (%o1)
2283 * --
2284 * ret0 status (%o0)
2285 */
2286 ENTRY_NP(hcall_intr_settarget)
2287 JMPL_VINO2DEVOP(%o0, DEVOPSVEC_SETTARGET, %g1, %g2, herr_inval)
2288 SET_SIZE(hcall_intr_settarget)
2289
2290
2291/*
2292 * cpu_yield
2293 *
2294 * --
2295 * ret0 status (%o0)
2296 */
2297 ENTRY_NP(hcall_cpu_yield)
2298#ifdef NIAGARA_ERRATUM_39
2299 rdhpr %hver, %g1
2300 srlx %g1, VER_MASK_MAJOR_SHIFT, %g1
2301 and %g1, VER_MASK_MAJOR_MASK, %g1
2302 cmp %g1, 1 ! Check for Niagara 1.x
2303 bleu,pt %xcc, hret_ok
2304 nop
2305#endif
2306#ifndef N2HACKS
2307 rd STR_STATUS_REG, %g1
2308 ! xor ACTIVE to clear it on current strand
2309 wr %g1, STR_STATUS_STRAND_ACTIVE, STR_STATUS_REG
2310#endif /* !N2HACKS */
2311 ! skid
2312 nop
2313 nop
2314 nop
2315 nop
2316 HCALL_RET(EOK)
2317 SET_SIZE(hcall_cpu_yield)
2318
2319
2320/*
2321 * cpu_myid
2322 *
2323 * --
2324 * ret0 status (%o0)
2325 * ret1 mycpuid (%o1)
2326 */
2327 ENTRY_NP(hcall_cpu_myid)
2328 CPU_STRUCT(%g1)
2329 ldub [%g1 + CPU_VID], %o1
2330 HCALL_RET(EOK)
2331 SET_SIZE(hcall_cpu_myid)
2332
2333
2334/*
2335 * hcall_niagara_getperf
2336 *
2337 * arg0 JBUS/DRAM performance register ID (%o0)
2338 * --
2339 * ret0 status (%o0)
2340 * ret1 Perf register value (%o1)
2341 */
2342 ENTRY_NP(hcall_niagara_getperf)
2343 ! check if JBUS/DRAM perf registers are accessible
2344 GUEST_STRUCT(%g1)
2345 set GUEST_PERFREG_ACCESSIBLE, %g2
2346 ldx [%g1 + %g2], %g2
2347 brz,pn %g2, herr_noaccess
2348 .empty
2349
2350 ! check if perfreg within range
2351 cmp %o0, NIAGARA_PERFREG_MAX
2352 bgeu,pn %xcc, herr_inval
2353 .empty
2354
2355 set niagara_perf_paddr_table - niagara_getperf_1, %g2
2356niagara_getperf_1:
2357 rd %pc, %g3
2358 add %g2, %g3, %g2
2359 sllx %o0, 4, %o0 ! table entry offset
2360 add %o0, %g2, %g2
2361 ldx [%g2], %g3 ! get perf reg paddr
2362 ldx [%g3], %o1 ! read perf reg
2363 HCALL_RET(EOK)
2364 SET_SIZE(hcall_niagara_getperf)
2365
2366/*
2367 * hcall_niagara_setperf
2368 *
2369 * arg0 JBUS/DRAM performance register ID (%o0)
2370 * arg1 perf register value (%o1)
2371 * --
2372 * ret0 status (%o0)
2373 */
2374 ENTRY_NP(hcall_niagara_setperf)
2375 ! check if JBUS/DRAM perf registers are accessible
2376 GUEST_STRUCT(%g1)
2377 set GUEST_PERFREG_ACCESSIBLE, %g2
2378 ldx [%g1 + %g2], %g2
2379 brz,pn %g2, herr_noaccess
2380 .empty
2381
2382 ! check if perfreg within range
2383 cmp %o0, NIAGARA_PERFREG_MAX
2384 bgeu,pn %xcc, herr_inval
2385 .empty
2386
2387 set niagara_perf_paddr_table - niagara_setperf_1, %g2
2388niagara_setperf_1:
2389 rd %pc, %g3
2390 add %g2, %g3, %g2
2391 sllx %o0, 4, %o0 ! table entry offset
2392 add %o0, %g2, %g2
2393 ldx [%g2], %g3 ! get perf reg paddr
2394 ldx [%g2+8], %g1 ! get perf reg write mask
2395 and %g1, %o1, %g1
2396 stx %g1, [%g3] ! write perf reg
2397 HCALL_RET(EOK)
2398 SET_SIZE(hcall_niagara_setperf)
2399
2400/*
2401 * Niagara JBUS/DRAM performance register physical address/mask table
2402 * (order must match performance register ID assignment)
2403 */
2404 .section ".text"
2405 .align 8
2406niagara_perf_paddr_table:
2407 .xword JBI_PERF_CTL, 0xff
2408 .xword JBI_PERF_COUNT, 0xffffffffffffffff
2409 .xword DRAM_PERF_CTL0, 0xff
2410 .xword DRAM_PERF_COUNT0, 0xffffffffffffffff
2411 .xword DRAM_PERF_CTL1, 0xff
2412 .xword DRAM_PERF_COUNT1, 0xffffffffffffffff
2413 .xword DRAM_PERF_CTL2, 0xff
2414 .xword DRAM_PERF_COUNT2, 0xffffffffffffffff
2415 .xword DRAM_PERF_CTL3, 0xff
2416 .xword DRAM_PERF_COUNT3, 0xffffffffffffffff
2417
2418
2419
2420/*
2421 * hcall_ra2pa
2422 *
2423 * arg0 ra (%o0)
2424 * --
2425 * ret0 status (%o0)
2426 * ret1 pa (%o1)
2427 */
2428 ENTRY_NP(hcall_ra2pa)
2429 GUEST_STRUCT(%g1)
2430 set GUEST_DIAGPRIV, %g2
2431 ldx [%g1 + %g2], %g2
2432 brz,pn %g2, herr_noaccess
2433 nop
2434
2435 RANGE_CHECK(%g1, %o0, 1, herr_noraddr, %g2)
2436 REAL_OFFSET(%g1, %o0, %o1, %g2)
2437
2438 HCALL_RET(EOK)
2439 SET_SIZE(hcall_ra2pa)
2440
2441
2442/*
2443 * hcall_hexec
2444 *
2445 * arg0 physical address of routine to execute (%o0)
2446 * --
2447 * ret0 status if noaccess, other SEP (somebody else's problem) (%o0)
2448 */
2449 ENTRY_NP(hcall_hexec)
2450 GUEST_STRUCT(%g1)
2451 set GUEST_DIAGPRIV, %g2
2452 ldx [%g1 + %g2], %g2
2453 brz,pn %g2, herr_noaccess
2454 nop
2455
2456 jmp %o0
2457 nop
2458 /* caller executes "done" */
2459 SET_SIZE(hcall_hexec)
2460
2461
2462/*
2463 * dump_buf_update
2464 *
2465 * arg0 ra of dump buffer (%o0)
2466 * arg1 size of dump buffer (%o1)
2467 * --
2468 * ret0 status (%o0)
2469 * ret1 size on success (%o1), min size on EINVAL
2470 */
2471 ENTRY_NP(hcall_dump_buf_update)
2472 GUEST_STRUCT(%g1)
2473
2474 /*
2475 * XXX What locking is required between multiple strands
2476 * XXX making simultaneous conf calls?
2477 */
2478
2479 /*
2480 * Any error unconfigures any currently configured dump buf
2481 * so set to unconfigured now to avoid special error exit code.
2482 */
2483 set GUEST_DUMPBUF_SIZE, %g4
2484 stx %g0, [%g1 + %g4]
2485 set GUEST_DUMPBUF_RA, %g4
2486 stx %g0, [%g1 + %g4]
2487 set GUEST_DUMPBUF_PA, %g4
2488 stx %g0, [%g1 + %g4]
2489
2490 ! Size of 0 unconfigures the dump
2491 brz,pn %o1, hret_ok
2492 nop
2493
2494 set DUMPBUF_MINSIZE, %g2
2495 cmp %o1, %g2
2496 blu,a,pn %xcc, herr_inval
2497 mov %g2, %o1 ! return min size on EINVAL
2498
2499 ! Check alignment
2500 btst (DUMPBUF_ALIGNMENT - 1), %o0
2501 bnz,pn %xcc, herr_badalign
2502 nop
2503
2504 RANGE_CHECK(%g1, %o0, %o1, herr_noraddr, %g2)
2505 REAL_OFFSET(%g1, %o0, %g2, %g3)
2506 !! %g2 pa of dump buffer
2507 set GUEST_DUMPBUF_SIZE, %g4
2508 stx %o1, [%g1 + %g4]
2509 set GUEST_DUMPBUF_RA, %g4
2510 stx %o0, [%g1 + %g4]
2511 set GUEST_DUMPBUF_PA, %g4
2512 stx %g2, [%g1 + %g4]
2513
2514 ! XXX Need to put something in the buffer
2515#if 0
2516 CPU_STRUCT(%g5)
2517 ldx [%g5 + CPU_ROOT], %g5
2518 ldx [%g5 + CONFIG_VERSION], %g1
2519 ! mov %g2, %g2
2520 ldx [%g5 + CONFIG_VERSIONLEN], %g3
2521 ! ASSERT(%g3 <= [GUEST_DUMPBUF_SIZE])
2522 ba xcopy
2523 rd %pc, %g7
2524#endif
2525
2526 HCALL_RET(EOK)
2527 SET_SIZE(hcall_dump_buf_update)
2528
2529
2530/*
2531 * dump_buf_info
2532 *
2533 * --
2534 * ret0 status (%o0)
2535 * ret1 current dumpbuf ra (%o1)
2536 * ret2 current dumpbuf size (%o2)
2537 */
2538 ENTRY_NP(hcall_dump_buf_info)
2539 GUEST_STRUCT(%g1)
2540 set GUEST_DUMPBUF_SIZE, %g4
2541 ldx [%g1 + %g4], %o2
2542 set GUEST_DUMPBUF_RA, %g4
2543 ldx [%g1 + %g4], %o1
2544 HCALL_RET(EOK)
2545 SET_SIZE(hcall_dump_buf_info)
2546
2547
2548/*
2549 * cpu_mondo_send
2550 *
2551 * arg0/1 cpulist (%o0/%o1)
2552 * arg2 ptr to 64-byte-aligned data to send (%o2)
2553 * --
2554 * ret0 status (%o0)
2555 */
2556 ENTRY(hcall_cpu_mondo_send)
2557 btst CPULIST_ALIGNMENT - 1, %o1
2558 bnz,pn %xcc, herr_badalign
2559 btst MONDO_DATA_ALIGNMENT - 1, %o2
2560 bnz,pn %xcc, herr_badalign
2561 nop
2562
2563 CPU_GUEST_STRUCT(%g3, %g6)
2564 !! %g3 cpup
2565 !! %g6 guestp
2566
2567 sllx %o0, CPULIST_ENTRYSIZE_SHIFT, %g5
2568
2569 RANGE_CHECK(%g6, %o1, %g5, herr_noraddr, %g7)
2570 REAL_OFFSET(%g6, %o1, %g1, %g7)
2571 !! %g1 cpulistpa
2572 RANGE_CHECK(%g6, %o2, MONDO_DATA_SIZE, herr_noraddr, %g7)
2573 REAL_OFFSET(%g6, %o2, %g2, %g5)
2574 !! %g2 mondopa
2575
2576 clr %g4
2577 !! %g4 true for EWOULDBLOCK
2578another_cpu:
2579 deccc %o0
2580 blu,pn %xcc, 1f
2581 nop !! nop
2582 ldsh [%g1], %g6
2583 !! %g6 tcpuid
2584 cmp %g6, CPULIST_ENTRYDONE
2585 be,a,pn %xcc, another_cpu
2586 inc CPULIST_ENTRYSIZE, %g1
2587 cmp %g6, NCPUS
2588 bgeu,pn %xcc, herr_nocpu
2589 nop
2590#if GUEST_VCPUS_INCR == 8
2591 sllx %g6, 3, %g6
2592#else
2593 mulx %g6, GUEST_VCPUS_INCR, %g6
2594#endif
2595 ldx [%g3 + CPU_GUEST], %g5
2596 add %g5, GUEST_VCPUS, %g5
2597 ldx [%g5 + %g6], %g6
2598 !! %g6 tcpup
2599 brz,pn %g6, herr_nocpu
2600 .empty
2601
2602 cmp %g3, %g6
2603 be,pn %xcc, herr_inval ! Sending to self is illegal
2604 nop
2605
2606 /* Check to see if the mailbox is available */
2607 add %g6, CPU_COMMAND, %g5
2608 mov CPU_CMD_BUSY, %g7
2609 casxa [%g5]ASI_P, %g0, %g7
2610 brnz,a,pn %g7, another_cpu ! target is busy, try another
2611 inc %g4
2612
2613 /* Copy the mondo data into the target cpu's incoming buffer */
2614 ldx [%g2 + 0x00], %g7
2615 stx %g7, [%g6 + CPU_CMD_ARG0]
2616 ldx [%g2 + 0x08], %g7
2617 stx %g7, [%g6 + CPU_CMD_ARG1]
2618 ldx [%g2 + 0x10], %g7
2619 stx %g7, [%g6 + CPU_CMD_ARG2]
2620 ldx [%g2 + 0x18], %g7
2621 stx %g7, [%g6 + CPU_CMD_ARG3]
2622 ldx [%g2 + 0x20], %g7
2623 stx %g7, [%g6 + CPU_CMD_ARG4]
2624 ldx [%g2 + 0x28], %g7
2625 stx %g7, [%g6 + CPU_CMD_ARG5]
2626 ldx [%g2 + 0x30], %g7
2627 stx %g7, [%g6 + CPU_CMD_ARG6]
2628 ldx [%g2 + 0x38], %g7
2629 stx %g7, [%g6 + CPU_CMD_ARG7]
2630 membar #Sync
2631 mov CPU_CMD_GUESTMONDO_READY, %g7
2632 stx %g7, [%g6 + CPU_COMMAND]
2633
2634 /* Send a xcall vector interrupt to the target cpu */
2635 ldub [%g6 + CPU_PID], %g7
2636 sllx %g7, INT_VEC_DIS_VCID_SHIFT, %g5
2637 or %g5, VECINTR_XCALL, %g5
2638 stxa %g5, [%g0]ASI_INTR_UDB_W
2639
2640 mov CPULIST_ENTRYDONE, %g7
2641 sth %g7, [%g1]
2642
2643 ba another_cpu
2644 nop
26451:
2646 brnz,pn %g4, herr_wouldblock ! If remaining then EAGAIN
2647 nop
2648 HCALL_RET(EOK)
2649 SET_SIZE(hcall_cpu_mondo_send)
2650
2651
2652#define TTRACE_RELOC_ADDR(addr, scr0, scr1) \
2653 setx .+8, scr0, scr1 ;\
2654 rd %pc, scr0 ;\
2655 sub scr1, scr0, scr0 ;\
2656 sub addr, scr0, addr
2657
2658/*
2659 * hcal_ttrace_buf_conf
2660 *
2661 * arg0 ra of traptrace buffer (%o0)
2662 * arg1 size of traptrace buffer in entries (%o1)
2663 * --
2664 * ret0 status (%o0)
2665 * ret1 minimum #entries on EINVAL, #entries on success (%o1)
2666 */
2667 ENTRY_NP(hcall_ttrace_buf_conf)
2668 CPU_GUEST_STRUCT(%g1, %g2)
2669
2670 /*
2671 * Disable traptrace by restoring %htba to original traptable
2672 * always do this first to make error returns easier.
2673 */
2674 setx htraptable, %g3, %g4
2675 TTRACE_RELOC_ADDR(%g4, %g3, %g5)
2676 wrhpr %g4, %htba
2677
2678 ! Clear buffer description
2679 stx %g0, [%g1 + CPU_TTRACEBUF_SIZE] ! size must be first
2680 stx %g0, [%g1 + CPU_TTRACEBUF_PA]
2681 stx %g0, [%g1 + CPU_TTRACEBUF_RA]
2682
2683 /*
2684 * nentries (arg1) > 0 configures the buffer
2685 * nentries == 0 disables traptrace and cleans up buffer config
2686 */
2687 brz,pn %o1, hret_ok
2688 nop
2689
2690 ! Check alignment
2691 btst TTRACE_ALIGNMENT - 1, %o0
2692 bnz,pn %xcc, herr_badalign
2693 nop
2694
2695 ! Check that #entries is >= TTRACE_MINIMUM_ENTRIES
2696 cmp %o1, TTRACE_MINIMUM_ENTRIES
2697 blu,a,pn %xcc, herr_inval
2698 mov TTRACE_MINIMUM_ENTRIES, %o1
2699
2700 sllx %o1, TTRACE_RECORD_SZ_SHIFT, %g6 ! convert #entries to bytes
2701
2702 RANGE_CHECK(%g2, %o0, %g6, herr_noraddr, %g4)
2703 REAL_OFFSET(%g2, %o0, %g3, %g4)
2704 !! %g3 pa of traptrace buffer
2705 stx %o0, [%g1 + CPU_TTRACEBUF_RA]
2706 stx %g3, [%g1 + CPU_TTRACEBUF_PA]
2707 stx %g6, [%g1 + CPU_TTRACEBUF_SIZE] ! size must be last
2708
2709 !! Initialize traptrace buffer header
2710 mov TTRACE_RECORD_SIZE, %g2
2711 stx %g2, [%g1 + CPU_TTRACE_OFFSET]
2712 stx %g2, [%g3 + TTRACE_HEADER_OFFSET]
2713 stx %g2, [%g3 + TTRACE_HEADER_LAST_OFF]
2714 ! %o1 return is the same as that passed in
2715 HCALL_RET(EOK)
2716 SET_SIZE(hcall_ttrace_buf_conf)
2717
2718
2719/*
2720 * hcall_ttrace_buf_info
2721 *
2722 * --
2723 * ret0 status (%o0)
2724 * ret1 current traptrace buf ra (%o1)
2725 * ret2 current traptrace buf size (%o2)
2726 */
2727 ENTRY_NP(hcall_ttrace_buf_info)
2728 CPU_STRUCT(%g1)
2729
2730 ldx [%g1 + CPU_TTRACEBUF_RA], %o1
2731 ldx [%g1 + CPU_TTRACEBUF_SIZE], %o2
2732 srlx %o2, TTRACE_RECORD_SZ_SHIFT, %o2 ! convert bytes to #entries
2733 movrz %o2, %g0, %o1 ! ensure RA zero if size is zero
2734
2735 HCALL_RET(EOK)
2736 SET_SIZE(hcall_ttrace_buf_info)
2737
2738
2739/*
2740 * hcall_ttrace_enable
2741 *
2742 * arg0 boolean: 0 = disable, non-zero = enable (%o0)
2743 * --
2744 * ret0 status (%o0)
2745 * ret1 previous enable state (0=disabled, 1=enabled) (%o1)
2746 */
2747 ENTRY_NP(hcall_ttrace_enable)
2748 setx htraptracetable, %g1, %g2 ! %g2 = reloc'd &htraptracetable
2749 TTRACE_RELOC_ADDR(%g2, %g1, %g3)
2750
2751 setx htraptable, %g1, %g3 ! %g3 = reloc'd &htraptable
2752 TTRACE_RELOC_ADDR(%g3, %g1, %g4)
2753
2754 mov %g3, %g1 ! %g1 = (%o0 ? %g3 : %g2)
2755 movrnz %o0, %g2, %g1
2756
2757 rdhpr %htba, %g4 ! %o1 = (%htba == %g2)
2758 mov %g0, %o1
2759 cmp %g4, %g2
2760 move %xcc, 1, %o1
2761
2762 /*
2763 * Check that the guest has previously provided a buf for this cpu
2764 * Check here since by now %o1 will be properly set
2765 */
2766 CPU_STRUCT(%g2)
2767 TTRACE_CHK_BUF(%g2, %g3, herr_inval)
2768
2769 wrhpr %g1, %htba
2770
2771 HCALL_RET(EOK)
2772 SET_SIZE(hcall_ttrace_enable)
2773
2774
2775/*
2776 * hcall_ttrace_freeze
2777 *
2778 * arg0 boolean: 0 = disable, non-zero = enable (%o0)
2779 * --
2780 * ret0 status (%o0)
2781 * ret1 previous freeze state (0=disabled, 1=enabled) (%o1)
2782 */
2783 ENTRY_NP(hcall_ttrace_freeze)
2784 GUEST_STRUCT(%g1)
2785
2786 movrnz %o0, 1, %o0 ! normalize to formal bool
2787
2788 ! race conditions for two CPUs updating this not harmful
2789 ldx [%g1 + GUEST_TTRACE_FRZ], %o1 ! current val for ret1
2790 stx %o0, [%g1 + GUEST_TTRACE_FRZ]
2791
2792 HCALL_RET(EOK)
2793 SET_SIZE(hcall_ttrace_freeze)
2794
2795
2796/*
2797 * hcall_ttrace_addentry
2798 *
2799 * arg0 lower 16 bits stored in TTRACE_ENTRY_TAG (%o0)
2800 * arg1 stored in TTRACE_ENTRY_F1 (%o1)
2801 * arg2 stored in TTRACE_ENTRY_F2 (%o2)
2802 * arg3 stored in TTRACE_ENTRY_F3 (%o3)
2803 * arg4 stored in TTRACE_ENTRY_F4 (%o4)
2804 * --
2805 * ret0 status (%o0)
2806 */
2807 ENTRY_NP(hcall_ttrace_addentry)
2808 /*
2809 * Check that the guest has perviously provided a buf for this cpu
2810 * return EINVAL if not configured, ignore (EOK) if frozen
2811 */
2812 TTRACE_PTR(%g3, %g2, herr_inval, hret_ok)
2813
2814 rdpr %tl, %g4 ! %g4 holds current tl
2815 sub %g4, 1, %g3 ! %g3 holds tl of caller
2816 mov %g3, %g1 ! save for TL field fixup
2817 movrz %g3, 1, %g3 ! minimum is TL=1
2818 wrpr %g3, %tl
2819
2820 TTRACE_STATE(%g2, TTRACE_TYPE_GUEST, %g3, %g5)
2821 stb %g1, [%g2 + TTRACE_ENTRY_TL] ! overwrite with calc'd TL
2822
2823 wrpr %g4, %tl ! restore trap level
2824
2825 sth %o0, [%g2 + TTRACE_ENTRY_TAG]
2826 stx %o1, [%g2 + TTRACE_ENTRY_F1]
2827 stx %o2, [%g2 + TTRACE_ENTRY_F2]
2828 stx %o3, [%g2 + TTRACE_ENTRY_F3]
2829 stx %o4, [%g2 + TTRACE_ENTRY_F4]
2830
2831 TTRACE_NEXT(%g2, %g3, %g4, %g5)
2832
2833 HCALL_RET(EOK)
2834 SET_SIZE(hcall_ttrace_addentry)
2835
2836
2837/*
2838 * hcall_set_rtba - set the current cpu's rtba
2839 *
2840 * arg0 rtba (%o0)
2841 * --
2842 * ret0 status (%o0)
2843 * ret1 previous rtba (%o1)
2844 */
2845 ENTRY_NP(hcall_set_rtba)
2846 CPU_GUEST_STRUCT(%g1, %g2)
2847 !! %g1 = cpup
2848 !! %g2 = guestp
2849
2850 ! Return prior rtba value
2851 ldx [%g1 + CPU_RTBA], %o1
2852
2853 ! Check rtba for validity
2854 RANGE_CHECK(%g2, %o0, REAL_TRAPTABLE_SIZE, herr_noraddr, %g7)
2855 set REAL_TRAPTABLE_SIZE - 1, %g3
2856 btst %o0, %g3
2857 bnz,pn %xcc, herr_badalign
2858 nop
2859 stx %o0, [%g1 + CPU_RTBA]
2860 HCALL_RET(EOK)
2861 SET_SIZE(hcall_set_rtba)
2862
2863
2864/*
2865 * hcall_get_rtba - return the current cpu's rtba
2866 *
2867 * --
2868 * ret0 status (%o0)
2869 * ret1 rtba (%o1)
2870 */
2871 ENTRY_NP(hcall_get_rtba)
2872 CPU_STRUCT(%g1)
2873 ldx [%g1 + CPU_RTBA], %o1
2874 HCALL_RET(EOK)
2875 SET_SIZE(hcall_get_rtba)
2876
2877
2878#ifdef CONFIG_BRINGUP
2879
2880/*
2881 * vdev_genintr - generate a virtual interrupt
2882 *
2883 * arg0 sysino (%o0)
2884 * --
2885 * ret0 status (%o0)
2886 */
2887 ENTRY_NP(hcall_vdev_genintr)
2888 GUEST_STRUCT(%g1)
2889 !! %g1 = guestp
2890 VINO2DEVINST(%g1, %o0, %g2, herr_inval)
2891 cmp %g2, DEVOPS_VDEV
2892 bne,pn %xcc, herr_inval
2893 nop
2894 add %g1, GUEST_VDEV_STATE, %g2
2895 add %g2, VDEV_STATE_MAPREG, %g2
2896 !! %g2 = mapreg array
2897 and %o0, VINTR_INO_MASK, %o0 ! get INO bits
2898 mulx %o0, MAPREG_SIZE, %g1
2899 add %g2, %g1, %g1
2900 !! %g1 = mapreg
2901 HVCALL(vdev_intr_generate)
2902 HCALL_RET(EOK)
2903 SET_SIZE(hcall_vdev_genintr)
2904
2905#endif /* CONFIG_BRINGUP */
2906#endif
2907
2908
2909hcall_cons_putchar:
2910 setx 0xfff0c2c000, %g1, %g2
2911 stb %o0, [%g2]
2912 done
2913hcall_io_peek:
2914 ldx [%o0],%o0
2915 done
2916hcall_io_poke:
2917 stx %o1, [%o0]
2918 done
2919
2920hcall_vpci_io_peek:
2921 ldxa [%o0]ASI_PRIMARY_LITTLE,%o0
2922 done
2923hcall_vpci_io_poke:
2924 stxa %o1, [%o0]ASI_PRIMARY_LITTLE
2925 done
2926