Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / ontario / src / mmu.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: mmu.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)mmu.s 1.45 07/05/03 SMI"
50
51/*
52 * Niagara mmu code
53 */
54
55#include <sys/asm_linkage.h>
56#include <hprivregs.h>
57#include <asi.h>
58#include <traps.h>
59#include <mmu.h>
60#include <sun4v/traps.h>
61#include <sun4v/mmu.h>
62#include <mmustat.h>
63#include <cpu_errs.h>
64
65#include <guest.h>
66#include <offsets.h>
67#include <debug.h>
68#include <util.h>
69
70 ! %g1 vcpup
71 ! %g2 8k-aligned real addr from tag access
72 ENTRY_NP(rdmmu_miss)
73 ! offset handling
74 ! XXX if hypervisor access then panic instead of watchdog_guest
75 VCPU2GUEST_STRUCT(%g1, %g7)
76 set 8 KB, %g5
77 RA2PA_RANGE_CONV(%g7, %g2, %g5, 1f, %g4, %g3)
78 ! %g3 PA
79
80 ! tte valid, cp, writable, priv
81 mov 1, %g2
82 sllx %g2, 63, %g2
83 or %g2, TTE4U_CP | TTE4U_P | TTE4U_W, %g2
84 or %g3, %g2, %g3
85 mov TLB_IN_REAL, %g2 ! Real bit
86 stxa %g3, [%g2]ASI_DTLB_DATA_IN
87 retry
88
891:
90 GUEST_STRUCT(%g1)
91 set 8192, %g6
92 RANGE_CHECK_IO(%g1, %g2, %g6, .rdmmu_miss_found, .rdmmu_miss_not_found,
93 %g3, %g4)
94.rdmmu_miss_found:
95 mov %g2, %g3
96
97 ! tte valid, e, writable, priv
98 mov 1, %g2
99 sllx %g2, 63, %g2
100 or %g2, TTE4U_E | TTE4U_P | TTE4U_W, %g2
101 or %g3, %g2, %g3
102 mov TLB_IN_REAL, %g2 ! Real bit
103 stxa %g3, [%g2]ASI_DTLB_DATA_IN
104 retry
105
106
107.rdmmu_miss_not_found:
1081:
109 ! FIXME: This test to be subsumed when we fix the RA mappings
110 ! for multiple RA blocks
111 ! %g1 guest struct
112 ! %g2 real address
113 set GUEST_LDC_MAPIN_BASERA, %g7
114 ldx [ %g1 + %g7 ], %g3
115 subcc %g2, %g3, %g4
116 bneg,pn %xcc, 2f
117 nop
118 set GUEST_LDC_MAPIN_SIZE, %g5
119 ldx [ %g1 + %g5 ], %g6
120 subcc %g4, %g6, %g0
121 ! check regs passed in to mapin_ra:
122 bneg,pt %xcc, ldc_dmmu_mapin_ra
123 nop
124
125 ENTRY_NP(rdmmu_miss_not_found2)
1262:
127 LEGION_GOT_HERE
128 mov MMU_FT_INVALIDRA, %g1
129 ba,pt %xcc, revec_dax ! (%g1=ft, %g2=addr, %g3=ctx)
130 mov 0, %g3
131 SET_SIZE(rdmmu_miss)
132
133 !
134 ! %g1 = vcpup
135 !
136 ENTRY_NP(rimmu_miss)
137 mov MMU_TAG_ACCESS, %g2
138 ldxa [%g2]ASI_IMMU, %g2 /* tag access */
139 set ((1 << 13) - 1), %g3
140 andn %g2, %g3, %g2
141
142 VCPU2GUEST_STRUCT(%g1, %g3)
143 RA2PA_RANGE_CONV(%g3, %g2, %g0, 1f, %g4, %g1)
144 ! %g1 PA
145
146 ! tte valid, cp, writable, priv
147 mov 1, %g2
148 sllx %g2, 63, %g2
149 or %g2, TTE4U_CP | TTE4U_P | TTE4U_W, %g2
150 or %g1, %g2, %g1
151 mov TLB_IN_REAL, %g2 ! Real bit
152 stxa %g1, [%g2]ASI_ITLB_DATA_IN
153 retry
154
1551:
156 ! %g2 real address
157 LEGION_GOT_HERE
158 mov MMU_FT_INVALIDRA, %g1
159 ba,pt %xcc, revec_iax ! (%g1=ft, %g2=addr, %g3=ctx)
160 mov 0, %g3
161 SET_SIZE(rimmu_miss)
162
163
164 /*
165 * Normal tlb miss handlers
166 *
167 * Guest miss area:
168 *
169 * NB: If it's possible to context switch a guest then
170 * the tag access register (tag target too?) needs to
171 * be saved/restored.
172 */
173
174 /* %g1 contains per CPU area */
175 ENTRY_NP(immu_miss)
176 rd %tick, %g2
177 stx %g2, [%g1 + CPU_SCR0]
178 ldxa [%g0]ASI_IMMU, %g3 /* tag target */
179 srlx %g3, TAGTRG_CTX_RSHIFT, %g4 /* ctx from tag target */
180
181 ! %g1 = CPU pointer
182 ! %g3 = tag target
183 ! %g4 = ctx
184
185.checkitsb0:
186 ! for context != 0 and unshared TSB, that ctx == TSB ctx
187 brz,pn %g4, 1f
188 mov %g3, %g2
189 ld [%g1 + CPU_TSBDS_CTXN + TSBD_CTX_INDEX], %g5
190 cmp %g5, -1
191 be,pn %icc, 1f
192 nop
193 ! if TSB not shared, zero out context for match
194 sllx %g3, TAGTRG_VA_LSHIFT, %g2
195 srlx %g2, TAGTRG_VA_LSHIFT, %g2 ! clear context
1961:
197 ldxa [%g0]ASI_IMMU_TSB_PS0, %g5
198 ! if TSB desc. specifies xor of TSB index, do it here
199 ! e.g. for shared TSBs in S9 xor value is ctx << 4
200 ldda [%g5]ASI_QUAD_LDD, %g6 /* g6 = tag, g7 = data */
201 cmp %g6, %g2
202 bne,pn %xcc, .checkitsb1 ! tag mismatch
203 nop
204 brlz,pt %g7, .itsbhit ! TTE valid
205 nop
206
207.checkitsb1:
208 ! repeat check for second TSB
209 brz,pn %g4, 1f
210 mov %g3, %g2
211 ld [%g1 + CPU_TSBDS_CTXN + TSBD_BYTES + TSBD_CTX_INDEX], %g5
212 cmp %g5, -1
213 be,pn %icc, 1f
214 nop
215 ! if TSB not shared, zero out context for match
216 sllx %g3, TAGTRG_VA_LSHIFT, %g2
217 srlx %g2, TAGTRG_VA_LSHIFT, %g2 ! clear context
2181:
219 ldxa [%g0]ASI_IMMU_TSB_PS1, %g5
220 ! if TSB desc. specifies xor of TSB index, do it here
221 ldda [%g5]ASI_QUAD_LDD, %g6 /* g6 = tag, g7 = data */
222 cmp %g6, %g2
223 bne,pn %xcc, .checkipermmaps ! tag mismatch
224 nop
225 brgez,pn %g7, .checkipermmaps ! TTE valid?
226 nop
227
228.itsbhit:
229 ! extract sz from tte
230 TTE_SIZE(%g7, %g4, %g3, .itsb_inv_pgsz)
231 btst TTE_X, %g7 ! must check X bit for IMMU
232 bz,pn %icc, .itsbmiss
233 sub %g4, 1, %g5 ! %g5 page mask
234
235 ! extract ra from tte
236 sllx %g7, 64 - 40, %g3
237 srlx %g3, 64 - 40 + 13, %g3
238 sllx %g3, 13, %g3 ! %g3 real address
239 xor %g7, %g3, %g7 ! %g7 orig tte with ra field zeroed
240 andn %g3, %g5, %g3
241
242 VCPU2GUEST_STRUCT(%g1, %g6)
243 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, .itsb_ra_range, %g5, %g1) ! XXX fault not just a miss
244 mov %g1, %g3
245 VCPU_STRUCT(%g1) ! restore vcpu
246
247 or %g7, %g3, %g7 ! %g7 new tte with pa
248
249 CLEAR_TTE_LOCK_BIT(%g7, %g5)
250
251 set TLB_IN_4V_FORMAT, %g5 ! %g5 sun4v-style tte selection
252 stxa %g7, [%g5]ASI_ITLB_DATA_IN
253 !
254 ! %g1 = CPU pointer
255 ! %g7 = TTE
256 !
257 ldx [%g1 + CPU_MMUSTAT_AREA], %g6
258 brnz,pn %g6, 1f
259 nop
260
261 retry
262
2631:
264 rd %tick, %g2
265 ldx [%g1 + CPU_SCR0], %g1
266 sub %g2, %g1, %g5
267 !
268 ! %g5 = %tick delta
269 ! %g6 = MMU statistics area
270 ! %g7 = TTE
271 !
272 inc MMUSTAT_I, %g6 /* stats + i */
273 ldxa [%g0]ASI_IMMU, %g3 /* tag target */
274 srlx %g3, TAGTRG_CTX_RSHIFT, %g4 /* ctx from tag target */
275 mov MMUSTAT_CTX0, %g1
276 movrnz %g4, MMUSTAT_CTXNON0, %g1
277 add %g6, %g1, %g6 /* stats + i + ctx */
278 and %g7, TTE_SZ_MASK, %g7
279 sllx %g7, MMUSTAT_ENTRY_SZ_SHIFT, %g7
280 add %g6, %g7, %g6 /* stats + i + ctx + pgsz */
281 ldx [%g6 + MMUSTAT_TICK], %g3
282 add %g3, %g5, %g3
283 stx %g3, [%g6 + MMUSTAT_TICK]
284 ldx [%g6 + MMUSTAT_HIT], %g3
285 inc %g3
286 stx %g3, [%g6 + MMUSTAT_HIT]
287 retry
288
289 ! %g1 = CPU struct
290 ! %g4 = context
291.checkipermmaps:
292 brnz,pt %g4, .itsbmiss ! only context zero has perm mappings
293 nop
294 VCPU2GUEST_STRUCT(%g1, %g2)
295 mov GUEST_PERM_MAPPINGS_INCR*(NPERMMAPPINGS-1), %g3
296 add %g3, GUEST_PERM_MAPPINGS, %g3
297 add %g2, %g3, %g2
298 mov -(GUEST_PERM_MAPPINGS_INCR*(NPERMMAPPINGS-1)), %g3
299 rdpr %tpc, %g4
3001:
301 ldda [ %g2 + %g3 ] ASI_QUAD_LDD, %g6 ! Ld TTE (g7) + Tag (g6)
302
303 ! Figure page size match mask
304 ! FIXME: Could speed this by storing the mask ... but
305 ! atomicity problems with storage. Other option is
306 ! store pre-computed page size shift in tag bits 0-13
307 brgez,pn %g7, 2f
308 and %g7, TTE_SZ_MASK, %g5
309 add %g5, %g5, %g1
310 add %g5, %g1, %g1 ! Mult size by 3
311 add %g1, 13, %g1 ! Add 13
312 mov 1, %g5
313 sllx %g5, %g1, %g5 ! Compute bytes per page
314 sub %g5, 1, %g5 ! Page mask for TTE retrieved
315
316 xor %g6, %g4, %g6
317 andncc %g6, %g5, %g0 ! Check for tag match
318
319 beq,pt %xcc, 3f
320 nop
321
3222:
323 brlz,pt %g3, 1b
324 add %g3, GUEST_PERM_MAPPINGS_INCR, %g3
325
326 VCPU_STRUCT(%g1)
327 ba,pt %xcc, .itsbmiss
328 mov %g0, %g4
329
3303:
331 ! Found a matching entry - can we load it into the ITLB
332 VCPU_STRUCT(%g1)
333 add %g2, %g3, %g2 ! Ptr to map entry
334
335 ! Calculate index into perm bit set
336 ldub [%g1 + CPU_VID], %g3
337 and %g3, MAPPING_XWORD_MASK, %g4
338 mov 1, %g5
339 sllx %g5, %g4, %g4 ! Bit in mask
340 srlx %g3, MAPPING_XWORD_SHIFT, %g3
341 sllx %g3, MAPPING_XWORD_BYTE_SHIFT_BITS, %g3
342 add %g2, %g3, %g2
343
344 ldx [%g2 + MAPPING_ICPUSET], %g3
345 btst %g3, %g4
346 bz,pn %xcc, .itsbmiss
347 mov %g0, %g4
348
349 ! Stuff entry - it's already been swizzled
350 set TLB_IN_4V_FORMAT, %g5 ! %g5 sun4v-style tte selection
351 stxa %g7, [%g5]ASI_ITLB_DATA_IN
352
353 retry
354
355.itsbmiss:
356 ldx [%g1 + CPU_MMU_AREA], %g2
357 brz,pn %g2, watchdog_guest
358 .empty
359
360 ! %g1 is CPU pointer
361 ! %g2 is MMU Fault Status Area
362 ! %g4 is context (possibly shifted - still OK for zero test)
363 /* if ctx == 0 and ctx0 set TSBs used, take slow trap */
364 /* if ctx != 0 and ctxnon0 set TSBs used, take slow trap */
365 mov CPU_NTSBS_CTXN, %g7
366 movrz %g4, CPU_NTSBS_CTX0, %g7
367 ldx [%g1 + %g7], %g7
368 brnz,pn %g7, .islowmiss
369 nop
370
371.ifastmiss:
372 /*
373 * Update MMU_FAULT_AREA_INSTR
374 */
375 mov MMU_TAG_ACCESS, %g3
376 ldxa [%g3]ASI_IMMU, %g3 /* tag access */
377 set (NCTXS - 1), %g5
378 andn %g3, %g5, %g4
379 and %g3, %g5, %g5
380 stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
381 stx %g5, [%g2 + MMU_FAULT_AREA_ICTX]
382 /* fast misses do not update MMU_FAULT_AREA_IFT with MMU_FT_FASTMISS */
383 ! wrpr %g0, TT_FAST_IMMU_MISS, %tt /* already set */
384 rdpr %pstate, %g3
385 or %g3, PSTATE_PRIV, %g3
386 wrpr %g3, %pstate
387 rdpr %tba, %g3
388 add %g3, (TT_FAST_IMMU_MISS << TT_OFFSET_SHIFT), %g3
3897:
390 rdpr %tl, %g2
391 cmp %g2, 1 /* trap happened at tl=0 */
392 be,pt %xcc, 1f
393 .empty
394 set TRAPTABLE_SIZE, %g5
395
396 cmp %g2, MAXPTL
397 bgu,pn %xcc, watchdog_guest
398 add %g5, %g3, %g3
399
4001:
401 mov HPSTATE_GUEST, %g5 ! set ENB bit
402 jmp %g3
403 wrhpr %g5, %hpstate
404
405.islowmiss:
406 /*
407 * Update MMU_FAULT_AREA_INSTR
408 */
409 mov MMU_TAG_TARGET, %g3
410 ldxa [%g3]ASI_IMMU, %g3 /* tag target */
411 srlx %g3, TAGTRG_CTX_RSHIFT, %g3
412 stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
413 rdpr %tpc, %g4
414 stx %g4, [%g2 + MMU_FAULT_AREA_IADDR]
415 mov MMU_FT_MISS, %g4
416 stx %g4, [%g2 + MMU_FAULT_AREA_IFT]
417 wrpr %g0, TT_IMMU_MISS, %tt
418 rdpr %pstate, %g3
419 or %g3, PSTATE_PRIV, %g3
420 wrpr %g3, %pstate
421 rdpr %tba, %g3
422 add %g3, (TT_IMMU_MISS << TT_OFFSET_SHIFT), %g3
423 ba,a 7b
424 .empty
425
426.itsb_inv_pgsz:
427 /* IAX with FT=Invalid Page Size (15), VA, CTX */
428 ba,pt %xcc, .itsb_iax
429 mov MMU_FT_PAGESIZE, %g3
430
431.itsb_ra_range:
432 /* IAX with FT=Invalid TSB Entry (16), VA, CTX */
433 mov MMU_FT_INVTSBENTRY, %g3
434 /*FALLTHROUGH*/
435
436.itsb_iax:
437 !! %g1 = cpup
438 ldx [%g1 + CPU_MMU_AREA], %g2
439 brz,pn %g2, watchdog_guest ! Nothing we can do about this
440 nop
441 stx %g3, [%g2 + MMU_FAULT_AREA_IFT]
442 mov MMU_TAG_TARGET, %g3
443 ldxa [%g3]ASI_IMMU, %g3 /* tag target */
444 srlx %g3, TAGTRG_CTX_RSHIFT, %g3
445 stx %g3, [%g2 + MMU_FAULT_AREA_ICTX]
446 rdpr %tpc, %g3
447 stx %g3, [%g2 + MMU_FAULT_AREA_IADDR]
448 REVECTOR(TT_IAX)
449 SET_SIZE(immu_miss)
450
451
452 /* %g1 contains per CPU area */
453 ENTRY_NP(dmmu_miss)
454 rd %tick, %g2
455 stx %g2, [%g1 + CPU_SCR0]
456 ldxa [%g0]ASI_DMMU, %g3 /* tag target */
457 srlx %g3, TAGTRG_CTX_RSHIFT, %g4 /* ctx from tag target */
458
459 ! %g1 = CPU pointer
460 ! %g3 = tag target
461 ! %g4 = ctx
462
463.checkdtsb0:
464 ! for context != 0 and unshared TSB, that ctx == TSB ctx
465 brz,pn %g4, 1f
466 mov %g3, %g2
467 ld [%g1 + CPU_TSBDS_CTXN + TSBD_CTX_INDEX], %g5
468 cmp %g5, -1
469 be,pn %icc, 1f
470 nop
471 ! if TSB not shared, zero out context for match
472 sllx %g3, TAGTRG_VA_LSHIFT, %g2
473 srlx %g2, TAGTRG_VA_LSHIFT, %g2 ! clear context
4741:
475 ldxa [%g0]ASI_DMMU_TSB_PS0, %g5
476 ! if TSB desc. specifies xor of TSB index, do it here
477 ! e.g. for shared TSBs in S9 xor value is ctx << 4
478 ldda [%g5]ASI_QUAD_LDD, %g6 /* g6 = tag, g7 = data */
479 cmp %g6, %g2
480 bne,pn %xcc, .checkdtsb1 ! tag mismatch
481 nop
482 brlz,pt %g7, .dtsbhit ! TTE valid
483 nop
484
485.checkdtsb1:
486 ! repeat check for second TSB
487 brz,pn %g4, 1f
488 mov %g3, %g2
489 ld [%g1 + CPU_TSBDS_CTXN + TSBD_BYTES + TSBD_CTX_INDEX], %g5
490 cmp %g5, -1
491 be,pn %icc, 1f
492 nop
493 ! if TSB not shared, zero out context for match
494 sllx %g3, TAGTRG_VA_LSHIFT, %g2
495 srlx %g2, TAGTRG_VA_LSHIFT, %g2 ! clear context
4961:
497 ldxa [%g0]ASI_DMMU_TSB_PS1, %g5
498 ! if TSB desc. specifies xor of TSB index, do it here
499 ldda [%g5]ASI_QUAD_LDD, %g6 /* g6 = tag, g7 = data */
500 cmp %g6, %g2
501 bne,pn %xcc, .checkdpermmaps ! tag mismatch
502 nop
503 brgez,pn %g7, .checkdpermmaps ! TTE valid
504 nop
505
506.dtsbhit:
507 ! extract sz from tte
508 TTE_SIZE(%g7, %g4, %g3, .dtsb_inv_pgsz)
509 sub %g4, 1, %g5 ! %g5 page mask
510
511 ! extract ra from tte
512 sllx %g7, 64 - 40, %g3
513 srlx %g3, 64 - 40 + 13, %g3
514 sllx %g3, 13, %g3 ! %g3 real address
515 xor %g7, %g3, %g7 ! %g7 orig tte with ra field zeroed
516 andn %g3, %g5, %g3
517 ldx [%g1 + CPU_GUEST], %g6
518
519
520 ! %g1 cpu struct
521 ! %g2 --
522 ! %g3 raddr
523 ! %g4 page size
524 ! %g5 --
525 ! %g6 guest struct
526 ! %g7 TTE ready for pa
527 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, 3f, %g5, %g2)
528 mov %g2, %g3 ! %g3 PA
5294:
530 ! %g1 cpu struct
531 ! %g3 paddr
532 ! %g7 TTE ready for pa
533 or %g7, %g3, %g7 ! %g7 new tte with pa
534
535 CLEAR_TTE_LOCK_BIT(%g7, %g5)
536
537 set TLB_IN_4V_FORMAT, %g5 ! %g5 sun4v-style tte selection
538 stxa %g7, [%g5]ASI_DTLB_DATA_IN
539 !
540 ! %g1 = CPU pointer
541 ! %g7 = TTE
542 !
543 ldx [%g1 + CPU_MMUSTAT_AREA], %g6
544 brnz,pn %g6, 1f
545 nop
546
547 retry
548
5491:
550 rd %tick, %g2
551 ldx [%g1 + CPU_SCR0], %g1
552 sub %g2, %g1, %g5
553 !
554 ! %g5 = %tick delta
555 ! %g6 = MMU statistics area
556 ! %g7 = TTE
557 !
558 inc MMUSTAT_D, %g6 /* stats + d */
559 ldxa [%g0]ASI_DMMU, %g3 /* tag target */
560 srlx %g3, TAGTRG_CTX_RSHIFT, %g4 /* ctx from tag target */
561 mov MMUSTAT_CTX0, %g1
562 movrnz %g4, MMUSTAT_CTXNON0, %g1
563 add %g6, %g1, %g6 /* stats + d + ctx */
564 and %g7, TTE_SZ_MASK, %g7
565 sllx %g7, MMUSTAT_ENTRY_SZ_SHIFT, %g7
566 add %g6, %g7, %g6 /* stats + d + ctx + pgsz */
567 ldx [%g6 + MMUSTAT_TICK], %g3
568 add %g3, %g5, %g3
569 stx %g3, [%g6 + MMUSTAT_TICK]
570 ldx [%g6 + MMUSTAT_HIT], %g3
571 inc %g3
572 stx %g3, [%g6 + MMUSTAT_HIT]
573 retry
574
575
5763:
577 ! %g1 cpu struct
578 ! %g2 --
579 ! %g3 raddr
580 ! %g4 page size
581 ! %g5 --
582 ! %g6 guest struct
583 ! %g7 TTE ready for pa
584 ! check for IO address
585 ! branch back to 4b with pa in %g3
586 ! must preserve %g1 and %g7
587 RANGE_CHECK_IO(%g6, %g3, %g4, .dmmu_miss_io_found,
588 .dmmu_miss_io_not_found, %g2, %g5)
589.dmmu_miss_io_found:
590 ba,a 4b
591 nop
592
593 ! %g1 cpu struct
594 ! %g2 --
595 ! %g3 raddr
596 ! %g4 page size
597 ! %g5 --
598 ! %g6 guest struct
599 ! %g7 TTE ready for pa
600.dmmu_miss_io_not_found:
601 ! Last chance - check the LDC mapin area
602 ldx [ %g6 + GUEST_LDC_MAPIN_BASERA ], %g5
603 subcc %g3, %g5, %g5
604 bneg,pn %xcc, .dtsb_ra_range
605 nop
606 ldx [ %g6 + GUEST_LDC_MAPIN_SIZE ], %g2
607 subcc %g5, %g2, %g0
608 bneg,pt %xcc, ldc_dtsb_hit
609 nop
610
611 /* fall thru */
612
613 ENTRY_NP(dtsb_miss)
614
615 ! %g1 = CPU struct
616 ! %g4 = context
617.checkdpermmaps:
618 brnz,pt %g4, .dtsbmiss ! only context zero has perm mappings
619 nop
620 VCPU2GUEST_STRUCT(%g1, %g2)
621 mov GUEST_PERM_MAPPINGS_INCR*(NPERMMAPPINGS-1), %g3
622 add %g3, GUEST_PERM_MAPPINGS, %g3
623 add %g2, %g3, %g2
624 mov -(GUEST_PERM_MAPPINGS_INCR*(NPERMMAPPINGS-1)), %g3
625
626 mov MMU_TAG_ACCESS, %g4
627 ldxa [%g4]ASI_DMMU, %g4 /* tag access */
628 set (NCTXS - 1), %g5
629 andn %g4, %g5, %g4
630
6311:
632 ldda [ %g2 + %g3 ] ASI_QUAD_LDD, %g6 ! Ld TTE (g7) + Tag (g6)
633
634 ! Figure page size match mask
635 ! FIXME: Could speed this by storing the mask ... but
636 ! atomicity problems with storage. Other option is
637 ! store pre-computed page size shift in tag bits 0-13
638 brgez,pn %g7, 2f
639 and %g7, TTE_SZ_MASK, %g5
640 add %g5, %g5, %g1
641 add %g5, %g1, %g1 ! Mult size by 3
642 add %g1, 13, %g1 ! Add 13
643 mov 1, %g5
644 sllx %g5, %g1, %g5 ! Compute bytes per page
645 sub %g5, 1, %g5 ! Page mask for TTE retrieved
646
647 xor %g6, %g4, %g6
648 andncc %g6, %g5, %g0 ! Check for tag match
649
650 beq,pt %xcc, 3f
651 nop
652
6532:
654 brlz,pt %g3, 1b
655 add %g3, GUEST_PERM_MAPPINGS_INCR, %g3
656
657 VCPU_STRUCT(%g1)
658 ba,pt %xcc, .dtsbmiss
659 mov %g0, %g4
660
6613:
662 ! Found a matching entry - can we load it into the DTLB
663 VCPU_STRUCT(%g1)
664 add %g2, %g3, %g2 ! Ptr to map entry
665
666 ! Calculate index into perm bit set
667 ldub [%g1 + CPU_VID], %g3
668 and %g3, MAPPING_XWORD_MASK, %g4
669 mov 1, %g5
670 sllx %g5, %g4, %g4 ! Bit in mask
671 srlx %g3, MAPPING_XWORD_SHIFT, %g3
672 sllx %g3, MAPPING_XWORD_BYTE_SHIFT_BITS, %g3
673 add %g2, %g3, %g2
674
675 ldx [%g2 + MAPPING_DCPUSET], %g3
676 btst %g3, %g4
677 bz,pn %xcc, .dtsbmiss
678 mov %g0, %g4
679
680 ! Stuff entry - it's already been swizzled
681 set TLB_IN_4V_FORMAT, %g5 ! %g5 sun4v-style tte selection
682 stxa %g7, [%g5]ASI_DTLB_DATA_IN
683
684 retry
685
686.dtsbmiss:
687 ldx [%g1 + CPU_MMU_AREA], %g2
688 brz,pn %g2, watchdog_guest
689 .empty
690
691 ! %g1 is CPU pointer
692 ! %g2 is MMU Fault Status Area
693 ! %g4 is context (possibly shifted - still OK for zero test)
694 /* if ctx == 0 and ctx0 set TSBs used, take slow trap */
695 /* if ctx != 0 and ctxnon0 set TSBs used, take slow trap */
696 mov CPU_NTSBS_CTXN, %g7
697 movrz %g4, CPU_NTSBS_CTX0, %g7
698 ldx [%g1 + %g7], %g7
699 brnz,pn %g7, .dslowmiss
700 nop
701
702.dfastmiss:
703 /*
704 * Update MMU_FAULT_AREA_DATA
705 */
706 mov MMU_TAG_ACCESS, %g3
707 ldxa [%g3]ASI_DMMU, %g3 /* tag access */
708 set (NCTXS - 1), %g5
709 andn %g3, %g5, %g4
710 and %g3, %g5, %g5
711 stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
712 stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
713 /* fast misses do not update MMU_FAULT_AREA_DFT with MMU_FT_FASTMISS */
714 ! wrpr %g0, TT_FAST_DMMU_MISS, %tt /* already set */
715 rdpr %pstate, %g3
716 or %g3, PSTATE_PRIV, %g3
717 wrpr %g3, %pstate
718 rdpr %tba, %g3
719 add %g3, (TT_FAST_DMMU_MISS << TT_OFFSET_SHIFT), %g3
7207:
721 rdpr %tl, %g2
722 cmp %g2, 1 /* trap happened at tl=0 */
723 be,pt %xcc, 1f
724 .empty
725 set TRAPTABLE_SIZE, %g5
726
727 cmp %g2, MAXPTL
728 bgu,pn %xcc, watchdog_guest
729 add %g5, %g3, %g3
730
7311:
732 mov HPSTATE_GUEST, %g5 ! set ENB bit
733 jmp %g3
734 wrhpr %g5, %hpstate
735
736.dslowmiss:
737 /*
738 * Update MMU_FAULT_AREA_DATA
739 */
740 mov MMU_TAG_ACCESS, %g3
741 ldxa [%g3]ASI_DMMU, %g3 /* tag access */
742 set (NCTXS - 1), %g5
743 andn %g3, %g5, %g4
744 and %g3, %g5, %g5
745 stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
746 stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
747 mov MMU_FT_MISS, %g4
748 stx %g4, [%g2 + MMU_FAULT_AREA_DFT]
749 wrpr %g0, TT_DMMU_MISS, %tt
750 rdpr %pstate, %g3
751 or %g3, PSTATE_PRIV, %g3
752 wrpr %g3, %pstate
753 rdpr %tba, %g3
754 add %g3, (TT_DMMU_MISS << TT_OFFSET_SHIFT), %g3
755 ba,a 7b
756 .empty
757
758.dtsb_inv_pgsz:
759 /* DAX with FT=Invalid Page Size (15), VA, CTX */
760 ba,pt %xcc, .dtsb_dax
761 mov MMU_FT_PAGESIZE, %g3
762
763.dtsb_ra_range:
764 /* DAX with FT=Invalid TSB Entry (16), VA, CTX */
765 mov MMU_FT_INVTSBENTRY, %g3
766 /*FALLTHROUGH*/
767
768.dtsb_dax:
769 !! %g1 = cpup
770 ldx [%g1 + CPU_MMU_AREA], %g2
771 brz,pn %g2, watchdog_guest ! Nothing we can do about this
772 nop
773 stx %g3, [%g2 + MMU_FAULT_AREA_DFT]
774 mov MMU_TAG_ACCESS, %g3
775 ldxa [%g3]ASI_DMMU, %g3 /* tag access */
776 set (NCTXS - 1), %g5
777 andn %g3, %g5, %g4
778 and %g3, %g5, %g5
779 stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
780 stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
781 REVECTOR(TT_DAX)
782 SET_SIZE(dmmu_miss)
783
784 /* %g2 contains guest's miss info pointer (hv phys addr) */
785 ENTRY_NP(dmmu_prot)
786 /*
787 * TLB parity errors can cause normal MMU traps (N1 PRM
788 * section 12.3.3 and 12.3.4). Check here for an outstanding
789 * parity error and have ue_err handle it instead.
790 */
791 ldxa [%g0]ASI_SPARC_ERR_STATUS, %g1 ! SPARC err reg
792 set (SPARC_ESR_DMDU | SPARC_ESR_DMSU), %g3 ! is it a dmdu/dmsu err
793 btst %g3, %g1
794 bnz %xcc, ue_err ! err handler takes care of it
795 /*
796 * Update MMU_FAULT_AREA_DATA
797 */
798 mov MMU_TAG_ACCESS, %g3
799 ldxa [%g3]ASI_DMMU, %g3 /* tag access */
800 set (NCTXS - 1), %g5
801 andn %g3, %g5, %g4
802 and %g3, %g5, %g5
803 stx %g4, [%g2 + MMU_FAULT_AREA_DADDR]
804 stx %g5, [%g2 + MMU_FAULT_AREA_DCTX]
805 /* fast misses do not update MMU_FAULT_AREA_DFT with MMU_FT_FASTPROT */
806 wrpr %g0, TT_FAST_DMMU_PROT, %tt /* already set? XXXQ */
807 rdpr %pstate, %g3
808 or %g3, PSTATE_PRIV, %g3
809 wrpr %g3, %pstate
810 rdpr %tba, %g3
811 add %g3, (TT_FAST_DMMU_PROT << TT_OFFSET_SHIFT), %g3
812
813 rdpr %tl, %g2
814 cmp %g2, 1 /* trap happened at tl=0 */
815 be,pt %xcc, 1f
816 .empty
817 set TRAPTABLE_SIZE, %g5
818
819 cmp %g2, MAXPTL
820 bgu,pn %xcc, watchdog_guest
821 add %g5, %g3, %g3
822
8231:
824 mov HPSTATE_GUEST, %g5 ! set ENB bit
825 jmp %g3
826 wrhpr %g5, %hpstate
827 SET_SIZE(dmmu_prot)
828
829
830/*
831 * set all TSB base registers to dummy
832 * call sequence:
833 * in:
834 * %g7 return address
835 *
836 * volatile:
837 * %g1
838 */
839 ENTRY_NP(set_dummytsb_ctx0)
840 ROOT_STRUCT(%g1)
841 ldx [%g1 + CONFIG_DUMMYTSB], %g1
842
843 stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS0
844 stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS0
845 stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS1
846 stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS1
847
848 stxa %g0, [%g0]ASI_DTSB_CONFIG_CTX0
849 jmp %g7 + 4
850 stxa %g0, [%g0]ASI_ITSB_CONFIG_CTX0
851 SET_SIZE(set_dummytsb_ctx0)
852
853 ENTRY_NP(set_dummytsb_ctxN)
854 ROOT_STRUCT(%g1)
855 ldx [%g1 + CONFIG_DUMMYTSB], %g1
856
857 stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS0
858 stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS0
859 stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS1
860 stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS1
861
862 stxa %g0, [%g0]ASI_DTSB_CONFIG_CTXN
863 jmp %g7 + 4
864 stxa %g0, [%g0]ASI_ITSB_CONFIG_CTXN
865 SET_SIZE(set_dummytsb_ctxN)
866
867
868 ENTRY_NP(dmmu_err)
869 /*
870 * TLB parity errors can cause normal MMU traps (N1 PRM
871 * section 12.3.3 and 12.3.4). Check here for an outstanding
872 * parity error and have ue_err handle it instead.
873 */
874 ldxa [%g0]ASI_SPARC_ERR_STATUS, %g1 ! SPARC err reg
875 set (SPARC_ESR_DMDU | SPARC_ESR_DMSU), %g2 ! is it a dmdu/dmsu err
876 btst %g2, %g1
877 bnz %xcc, ue_err ! err handler takes care of it
878 .empty
879
880 VCPU_STRUCT(%g3)
881 ldx [%g3 + CPU_MMU_AREA], %g3
882 brz,pn %g3, watchdog_guest ! Nothing we can do about this
883 .empty
884 ! %g3 - MMU_FAULT_AREA
885
886 /*
887 * Update MMU_FAULT_AREA_DATA
888 */
889 mov MMU_SFAR, %g4
890 ldxa [%g4]ASI_DMMU, %g4
891 stx %g4, [%g3 + MMU_FAULT_AREA_DADDR]
892 mov MMU_SFSR, %g5
893 ldxa [%g5]ASI_DMMU, %g4 ! Capture SFSR
894 stxa %g0, [%g5]ASI_DMMU ! Clear SFSR
895
896 mov MMU_TAG_ACCESS, %g5
897 ldxa [%g5]ASI_DMMU, %g5
898 set (NCTXS - 1), %g6
899 and %g5, %g6, %g5
900 stx %g5, [%g3 + MMU_FAULT_AREA_DCTX]
901
902 rdpr %tt, %g1
903 cmp %g1, TT_DAX
904 bne,pn %xcc, 3f
905 mov MMU_FT_MULTIERR, %g6 ! unknown FT or multiple bits
906
907 ! %g4 - sfsr
908 srlx %g4, MMU_SFSR_FT_SHIFT, %g5
909 andcc %g5, MMU_SFSR_FT_MASK, %g5
910 bz,pn %xcc, 2f
911 nop
912 ! %g5 - fault type
913 ! %g6 - sun4v ft
914 andncc %g5, MMU_SFSR_FT_PRIV, %g0
915 movz %xcc, MMU_FT_PRIV, %g6 ! priv is only bit set
916 andncc %g5, MMU_SFSR_FT_SO, %g0
917 movz %xcc, MMU_FT_SO, %g6 ! so is only bit set
918 andncc %g5, MMU_SFSR_FT_ATOMICIO, %g0
919 movz %xcc, MMU_FT_NCATOMIC, %g6 ! atomicio is only bit set
920 andncc %g5, MMU_SFSR_FT_ASI, %g0
921 movz %xcc, MMU_FT_BADASI, %g6 ! badasi is only bit set
922 andncc %g5, MMU_SFSR_FT_NFO, %g0
923 movz %xcc, MMU_FT_NFO, %g6 ! nfo is only bit set
924 andncc %g5, (MMU_SFSR_FT_VARANGE | MMU_SFSR_FT_VARANGE2), %g0
925 movz %xcc, MMU_FT_VARANGE, %g6 ! varange are only bits set
9262: stx %g6, [%g3 + MMU_FAULT_AREA_DFT]
9273: REVECTOR(%g1)
928 SET_SIZE(dmmu_err)
929
930
931 ENTRY_NP(immu_err)
932 /*
933 * TLB parity errors can cause normal MMU traps (N1 PRM
934 * section 12.3.1. Check here for an outstanding
935 * parity error and have ue_err handle it instead.
936 */
937 ldxa [%g0]ASI_SPARC_ERR_STATUS, %g1 ! SPARC err reg
938 set SPARC_ESR_IMDU, %g2 ! is it a imdu err
939 btst %g2, %g1
940 bnz %xcc, ue_err ! err handler takes care of it
941 rdhpr %htstate, %g1
942 btst HTSTATE_HPRIV, %g1
943 bnz,pn %xcc, badtrap
944 .empty
945
946 VCPU_STRUCT(%g3)
947 ldx [%g3 + CPU_MMU_AREA], %g3
948 brz,pn %g3, watchdog_guest ! Nothing we can do about this
949 nop
950
951 ! %g3 - MMU_FAULT_AREA
952 /* decode sfsr, update MMU_FAULT_AREA_INSTR */
953 rdpr %tpc, %g4
954 stx %g4, [%g3 + MMU_FAULT_AREA_IADDR]
955
956 mov MMU_PCONTEXT, %g5
957 ldxa [%g5]ASI_MMU, %g5
958 movrnz %g2, 0, %g5 ! primary ctx for TL=0, nucleus ctx for TL>0
959 stx %g5, [%g3 + MMU_FAULT_AREA_ICTX]
960
961 ! %g6 - sun4v ft
962 mov MMU_FT_MULTIERR, %g6 ! unknown FT or multiple bits
963
964 mov MMU_SFSR, %g5
965 ldxa [%g5]ASI_IMMU, %g4 ! Capture SFSR
966 stxa %g0, [%g5]ASI_IMMU ! Clear SFSR
967 ! %g4 - sfsr
968 srlx %g4, MMU_SFSR_FT_SHIFT, %g5
969 andcc %g5, MMU_SFSR_FT_MASK, %g5
970 bz,pn %xcc, 1f
971 nop
972 ! %g5 - fault type
973 andncc %g5, MMU_SFSR_FT_PRIV, %g0
974 movz %xcc, MMU_FT_PRIV, %g6 ! priv is only bit set
975 andncc %g5, (MMU_SFSR_FT_VARANGE | MMU_SFSR_FT_VARANGE2), %g0
976 movz %xcc, MMU_FT_VARANGE, %g6 ! varange are only bits set
9771: stx %g6, [%g3 + MMU_FAULT_AREA_IFT]
978 REVECTOR(TT_IAX)
979 SET_SIZE(immu_err)