Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / hypervisor / src / greatlakes / huron / src / hcall_mmu.s
CommitLineData
920dae64
AT
1/*
2* ========== Copyright Header Begin ==========================================
3*
4* Hypervisor Software File: hcall_mmu.s
5*
6* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
7*
8* - Do no alter or remove copyright notices
9*
10* - Redistribution and use of this software in source and binary forms, with
11* or without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistribution of source code must retain the above copyright notice,
15* this list of conditions and the following disclaimer.
16*
17* - Redistribution in binary form must reproduce the above copyright notice,
18* this list of conditions and the following disclaimer in the
19* documentation and/or other materials provided with the distribution.
20*
21* Neither the name of Sun Microsystems, Inc. or the names of contributors
22* may be used to endorse or promote products derived from this software
23* without specific prior written permission.
24*
25* This software is provided "AS IS," without a warranty of any kind.
26* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
27* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
28* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN
29* MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR
30* ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
31* DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
32* OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
33* FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE
34* DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
35* ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF
36* SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
37*
38* You acknowledge that this software is not designed, licensed or
39* intended for use in the design, construction, operation or maintenance of
40* any nuclear facility.
41*
42* ========== Copyright Header End ============================================
43*/
44/*
45 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
46 * Use is subject to license terms.
47 */
48
49 .ident "@(#)hcall_mmu.s 1.3 07/07/17 SMI"
50
51 .file "hcall_mmu.s"
52
53#include <sys/asm_linkage.h>
54#include <hypervisor.h>
55#include <asi.h>
56#include <mmu.h>
57#include <sun4v/mmu.h>
58#include <hprivregs.h>
59#include <offsets.h>
60#include <config.h>
61#include <guest.h>
62#include <util.h>
63#include <debug.h>
64
65#define MAPTR 0
66/*
67 * mmu_tsb_ctx0
68 *
69 * arg0 ntsb (%o0)
70 * arg1 tsbs (%o1)
71 * --
72 * ret0 status (%o0)
73 */
74 ENTRY_NP(hcall_mmu_tsb_ctx0)
75 VCPU_GUEST_STRUCT(%g5, %g6)
76 /* set cpu->ntsbs to zero now in case we error exit */
77 stx %g0, [%g5 + CPU_NTSBS_CTX0]
78 /* Also zero out H/W bases */
79 HVCALL(set_dummytsb_ctx0)
80 brz,pn %o0, setntsbs0
81 cmp %o0, MAX_NTSB
82 bgu,pn %xcc, herr_inval
83 btst TSBD_ALIGNMENT - 1, %o1
84 bnz,pn %xcc, herr_badalign
85 sllx %o0, TSBD_SHIFT, %g3
86 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g3, herr_noraddr, %g2, %g1)
87 ! %g1 paddr
88
89 /* xcopy(tsbs, cpu->tsbds, ntsbs*TSBD_BYTES) */
90 add %g5, CPU_TSBDS_CTX0, %g2
91 ! xcopy trashes g1-4
92 HVCALL(xcopy)
93 /* loop over each TSBD and validate */
94 mov %o0, %g1
95 add %g5, CPU_TSBDS_CTX0, %g2
961:
97 /* check pagesize - accept only valid encodings */
98 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
99 cmp %g3, NPGSZ
100 bgeu,pn %xcc, herr_badpgsz
101 mov 1, %g4
102 sll %g4, %g3, %g3
103 btst TTE_VALIDSIZEARRAY, %g3
104 bz,pn %icc, herr_badpgsz
105 nop
106
107 /* check that pageszidx is set in pageszmask */
108 lduw [%g2 + TSBD_PGSZS_OFF], %g4
109 btst %g3, %g4
110 bz,pn %icc, herr_inval
111
112 /* check that pageszidx is lowest-order bit of pageszmask */
113 sub %g3, 1, %g3
114 btst %g3, %g4
115 bnz,pn %icc, herr_inval
116 nop
117
118 /* check associativity - only support 1-way */
119 lduh [%g2 + TSBD_ASSOC_OFF], %g3
120 cmp %g3, 1
121 bne,pn %icc, herr_badtsb
122 nop
123 /* check TSB size */
124 lduw [%g2 + TSBD_SIZE_OFF], %g3
125 sub %g3, 1, %g4
126 btst %g3, %g4 ! check for power-of-two
127 bnz,pn %xcc, herr_badtsb
128 mov TSB_SZ0_ENTRIES, %g4
129 cmp %g3, %g4
130 blu,pn %xcc, herr_badtsb
131 sll %g4, TSB_MAX_SZCODE, %g4
132 cmp %g3, %g4
133 bgu,pn %xcc, herr_badtsb
134 nop
135 /* check context index field - must be -1 (shared) or zero/one */
136 lduw [%g2 + TSBD_CTX_INDEX], %g3
137 cmp %g3, TSBD_CTX_IDX_SHARE
138 be,pt %xcc, 2f ! -1 is OK
139 nop
140 cmp %g3, MAX_NCTX_INDEX
141 bgu,pn %xcc, herr_inval
142 nop
1432:
144 /* check reserved field - must be zero for now */
145 ldx [%g2 + TSBD_RSVD_OFF], %g3
146 brnz,pn %g3, herr_inval
147 nop
148 /* check TSB base real address */
149 ldx [%g2 + TSBD_BASE_OFF], %g3
150 ld [%g2 + TSBD_SIZE_OFF], %g4
151 sllx %g4, TSBE_SHIFT, %g4
152 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, herr_noraddr, %g7, %g2)
153 ! restore %g2
154 add %g5, CPU_TSBDS_CTX0, %g2
155
156 /* range OK, check alignment */
157 sub %g4, 1, %g4
158 btst %g3, %g4
159 bnz,pn %xcc, herr_badalign
160 sub %g1, 1, %g1
161 brnz,pt %g1, 1b
162 add %g2, TSBD_BYTES, %g2
163
164 /* now setup HWTW regs */
165 ! %g5 = CPU pointer
166 clr %g7
167.ctx0_tsbd_loop:
168 cmp %g7, %o0
169 bgeu,pn %xcc, .ctx0_tsbd_finish
170 nop
171
172 add %g5, CPU_TSBDS_CTX0, %g2
173 sllx %g7, TSBD_SHIFT, %g1
174 add %g2, %g1, %g2
175 ldx [%g2 + TSBD_BASE_OFF], %g1
176 RA2PA_CONV(%g6, %g1, %g1, %g4) ! start with TSB base PA
177
178 lduw [%g2 + TSBD_SIZE_OFF], %g4
179
180 dec %g4
181 popc %g4, %g4
182 dec TSB_SZ0_SHIFT, %g4
183 or %g1, %g4, %g1
184
185 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
186 sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
187 or %g1, %g4, %g1 ! add page size field
188 or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
189 clr %g4
190 ld [%g2 + TSBD_CTX_INDEX], %g3
191 cmp %g3, 0 ! use primary-ctx0 always?
192 move %xcc, USE_TSB_PRIMARY_CTX, %g4
193 cmp %g3, 1 ! use secondary-ctx0 always?
194 move %xcc, USE_TSB_SECONDARY_CTX, %g4
195 sllx %g4, TSB_CFG_USE_CTX1_SHIFT, %g4
196 or %g1, %g4, %g1 ! add any use-ctx0|ctx1 bits
197 mov 1, %g4
198 sllx %g4, 63, %g4
199 or %g1, %g4, %g1 ! add valid bit
200
201 mov TSB_CFG_CTX0_0, %g4
202 cmp %g7, 1
203 move %xcc, TSB_CFG_CTX0_1, %g4
204 cmp %g7, 2
205 move %xcc, TSB_CFG_CTX0_2, %g4
206 cmp %g7, 3
207 move %xcc, TSB_CFG_CTX0_3, %g4
208 stxa %g1, [%g4]ASI_MMU_TSB
209
210 STRAND_STRUCT(%g2)
211 add %g2, STRAND_MRA, %g2
212 mulx %g7, STRAND_MRA_INCR, %g3 ! save z_tsb_cfg in strand.mra[0->3]
213 stx %g1, [%g2 + %g3]
214
215 ba,pt %xcc, .ctx0_tsbd_loop
216 inc %g7
217
218.ctx0_tsbd_finish:
219 stx %o0, [%g5 + CPU_NTSBS_CTX0]
220
221setntsbs0:
222 clr %o1 ! no return value
223 HCALL_RET(EOK)
224 SET_SIZE(hcall_mmu_tsb_ctx0)
225
226
227/*
228 * mmu_tsb_ctxnon0
229 *
230 * arg0 ntsb (%o0)
231 * arg1 tsbs (%o1)
232 * --
233 * ret0 status (%o0)
234 */
235 ENTRY_NP(hcall_mmu_tsb_ctxnon0)
236 VCPU_GUEST_STRUCT(%g5, %g6)
237 /* set cpu->ntsbs to zero now in case we error exit */
238 stx %g0, [%g5 + CPU_NTSBS_CTXN]
239 /* Also zero out H/W bases */
240 HVCALL(set_dummytsb_ctxN)
241 brz,pn %o0, setntsbsN
242 cmp %o0, MAX_NTSB
243 bgu,pn %xcc, herr_inval
244 btst TSBD_ALIGNMENT - 1, %o1
245 bnz,pn %xcc, herr_badalign
246 sllx %o0, TSBD_SHIFT, %g3
247 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g3, herr_noraddr, %g2, %g1)
248 !! %g1 paddr
249 add %g5, CPU_TSBDS_CTXN, %g2
250 ! xcopy trashes g1-4
251 HVCALL(xcopy)
252 /* loop over each TSBD and validate */
253 mov %o0, %g1
254 add %g5, CPU_TSBDS_CTXN, %g2
2551:
256 /* check pagesize - accept only valid encodings */
257 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3
258 cmp %g3, NPGSZ
259 bgeu,pn %xcc, herr_badpgsz
260 mov 1, %g4
261 sll %g4, %g3, %g3
262 btst TTE_VALIDSIZEARRAY, %g3
263 bz,pn %icc, herr_badpgsz
264 nop
265
266 /* check that pageszidx is set in pageszmask */
267 lduw [%g2 + TSBD_PGSZS_OFF], %g4
268 btst %g3, %g4
269 bz,pn %icc, herr_inval
270
271 /* check that pageszidx is lowest-order bit of pageszmask */
272 sub %g3, 1, %g3
273 btst %g3, %g4
274 bnz,pn %icc, herr_inval
275 nop
276
277 /* check associativity - only support 1-way */
278 lduh [%g2 + TSBD_ASSOC_OFF], %g3
279 cmp %g3, 1
280 bne,pn %icc, herr_badtsb
281 nop
282 /* check TSB size */
283 lduw [%g2 + TSBD_SIZE_OFF], %g3
284 sub %g3, 1, %g4
285 btst %g3, %g4 ! check for power-of-two
286 bnz,pn %xcc, herr_badtsb
287 mov TSB_SZ0_ENTRIES, %g4
288 cmp %g3, %g4
289 blu,pn %xcc, herr_badtsb
290 sll %g4, TSB_MAX_SZCODE, %g4
291 cmp %g3, %g4
292 bgu,pn %xcc, herr_badtsb
293 nop
294 /* check context index field - must be -1 (shared) or zero/one */
295 lduw [%g2 + TSBD_CTX_INDEX], %g3
296 cmp %g3, TSBD_CTX_IDX_SHARE
297 be,pt %xcc, 2f ! -1 is OK
298 nop
299 cmp %g3, MAX_NCTX_INDEX
300 bgu,pn %xcc, herr_inval
301 nop
3022:
303 /* check reserved field - must be zero for now */
304 ldx [%g2 + TSBD_RSVD_OFF], %g3
305 brnz,pn %g3, herr_inval
306 nop
307 /* check TSB base real address */
308 ldx [%g2 + TSBD_BASE_OFF], %g3
309 ld [%g2 + TSBD_SIZE_OFF], %g4
310 sllx %g4, TSBE_SHIFT, %g4
311 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, herr_noraddr, %g7, %g2)
312 ! restore %g2
313 add %g5, CPU_TSBDS_CTXN, %g2
314
315 /* range OK, check alignment */
316 sub %g4, 1, %g4
317 btst %g3, %g4
318 bnz,pn %xcc, herr_badalign
319 sub %g1, 1, %g1
320 brnz,pt %g1, 1b
321 add %g2, TSBD_BYTES, %g2
322
323 /* now setup HWTW regs */
324 ! %g5 = CPU pointer
325 clr %g7
326.ctxn_tsbd_loop:
327 cmp %g7, %o0
328 bgeu,pn %xcc, .ctxn_tsbd_finish
329 nop
330
331 add %g5, CPU_TSBDS_CTXN, %g2
332 sllx %g7, TSBD_SHIFT, %g1
333 add %g2, %g1, %g2
334 ldx [%g2 + TSBD_BASE_OFF], %g1
335 RA2PA_CONV(%g6, %g1, %g1, %g4) ! start with TSB base PA
336
337 lduw [%g2 + TSBD_SIZE_OFF], %g4
338
339 dec %g4
340 popc %g4, %g4
341 dec TSB_SZ0_SHIFT, %g4
342 or %g1, %g4, %g1
343
344 lduh [%g2 + TSBD_IDXPGSZ_OFF], %g4
345 sll %g4, TSB_CFG_PGSZ_SHIFT, %g4
346 or %g1, %g4, %g1 ! add page size field
347 or %g1, TSB_CFG_RA_NOT_PA, %g1 ! add RA not PA bit
348 clr %g4
349 ld [%g2 + TSBD_CTX_INDEX], %g3
350 cmp %g3, 0 ! use primary-ctxnon0 always?
351 move %xcc, USE_TSB_PRIMARY_CTX, %g4
352 cmp %g3, 1 ! use secondary-ctxnon0 always?
353 move %xcc, USE_TSB_SECONDARY_CTX, %g4
354 sllx %g4, TSB_CFG_USE_CTX1_SHIFT, %g4
355 or %g1, %g4, %g1 ! add any use-ctxnon0|ctx1 bits
356 mov 1, %g4
357 sllx %g4, 63, %g4
358 or %g1, %g4, %g1 ! add valid bit
359
360 mov TSB_CFG_CTXN_0, %g4
361 cmp %g7, 1
362 move %xcc, TSB_CFG_CTXN_1, %g4
363 cmp %g7, 2
364 move %xcc, TSB_CFG_CTXN_2, %g4
365 cmp %g7, 3
366 move %xcc, TSB_CFG_CTXN_3, %g4
367 stxa %g1, [%g4]ASI_MMU_TSB
368
369 STRAND_STRUCT(%g2)
370 add %g2, STRAND_MRA, %g2
371 add %g7, 4, %g3 ! save nz_tsb_cfg in strand.mra[4->7]
372 mulx %g3, STRAND_MRA_INCR, %g3
373 stx %g1, [%g2 + %g3]
374
375 ba,pt %xcc, .ctxn_tsbd_loop
376 inc %g7
377
378.ctxn_tsbd_finish:
379 stx %o0, [%g5 + CPU_NTSBS_CTXN]
380
381setntsbsN:
382 clr %o1 ! no return value
383 HCALL_RET(EOK)
384 SET_SIZE(hcall_mmu_tsb_ctxnon0)
385
386
387/*
388 * mmu_tsb_ctx0_info
389 *
390 * arg0 maxtsbs (%o0)
391 * arg1 tsbs (%o1)
392 * --
393 * ret0 status (%o0)
394 * ret1 ntsbs (%o1)
395 */
396 ENTRY_NP(hcall_mmu_tsb_ctx0_info)
397 VCPU_GUEST_STRUCT(%g5, %g6)
398 ! %g5 cpup
399 ! %g6 guestp
400
401 ! actual ntsbs always returned in %o1, so save tsbs now
402 mov %o1, %g4
403 ! Check to see if ntsbs fits into the supplied buffer
404 ldx [%g5 + CPU_NTSBS_CTX0], %o1
405 brz,pn %o1, hret_ok
406 cmp %o1, %o0
407 bgu,pn %xcc, herr_inval
408 nop
409
410 btst TSBD_ALIGNMENT - 1, %g4
411 bnz,pn %xcc, herr_badalign
412 sllx %o1, TSBD_SHIFT, %g3
413 ! %g3 size of tsbd in bytes
414 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g4, %g3, herr_noraddr, %g1, %g2)
415 ! %g2 pa of buffer
416 ! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
417 add %g5, CPU_TSBDS_CTX0, %g1
418 ! clobbers %g1-%g4
419 HVCALL(xcopy)
420
421 HCALL_RET(EOK)
422 SET_SIZE(hcall_mmu_tsb_ctx0_info)
423
424
425/*
426 * mmu_tsb_ctxnon0_info
427 *
428 * arg0 maxtsbs (%o0)
429 * arg1 tsbs (%o1)
430 * --
431 * ret0 status (%o0)
432 * ret1 ntsbs (%o1)
433 */
434 ENTRY_NP(hcall_mmu_tsb_ctxnon0_info)
435 VCPU_GUEST_STRUCT(%g5, %g6)
436 ! %g5 cpup
437 ! %g6 guestp
438
439 ! actual ntsbs always returned in %o1, so save tsbs now
440 mov %o1, %g4
441 ! Check to see if ntsbs fits into the supplied buffer
442 ldx [%g5 + CPU_NTSBS_CTXN], %o1
443 brz,pn %o1, hret_ok
444 cmp %o1, %o0
445 bgu,pn %xcc, herr_inval
446 nop
447
448 btst TSBD_ALIGNMENT - 1, %g4
449 bnz,pn %xcc, herr_badalign
450 sllx %o1, TSBD_SHIFT, %g3
451 ! %g3 size of tsbd in bytes
452 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g4, %g3, herr_noraddr, %g1, %g2)
453 ! %g2 pa of buffer
454 ! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES)
455 add %g5, CPU_TSBDS_CTXN, %g1
456 ! clobbers %g1-%g4
457 HVCALL(xcopy)
458
459 HCALL_RET(EOK)
460 SET_SIZE(hcall_mmu_tsb_ctxnon0_info)
461
462
463/*
464 * mmu_map_addr - stuff ttes directly into the tlbs
465 *
466 * arg0 vaddr (%o0)
467 * arg1 ctx (%o1)
468 * arg2 tte (%o2)
469 * arg3 flags (%o3)
470 * --
471 * ret0 status (%o0)
472 */
473 ENTRY_NP(hcall_mmu_map_addr)
474 VCPU_GUEST_STRUCT(%g1, %g6)
475
476#ifdef STRICT_API
477 CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
478 CHECK_MMU_FLAGS(%o3, herr_inval)
479#endif /* STRICT_API */
480
481 ! extract sz from tte
482 TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
483 sub %g4, 1, %g5 ! %g5 page mask
484
485 ! extract ra from tte
486 sllx %o2, 64 - 40, %g2
487 srlx %g2, 64 - 40 + 13, %g2
488 sllx %g2, 13, %g2 ! %g2 real address
489 xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
490 andn %g2, %g5, %g2
491 RA2PA_RANGE_CONV(%g6, %g2, %g4, 3f, %g5, %g7)
492 mov %g7, %g2
493 ! %g2 PA
4944: or %g3, %g2, %g1 ! %g1 new tte with pa
495
496 set (NCTXS - 1), %g3
497 and %o1, %g3, %o1
498 andn %o0, %g3, %o0
499 or %o0, %o1, %g2 ! %g2 tag
500 mov MMU_TAG_ACCESS, %g3 ! %g3 tag_access
501
502 btst MAP_DTLB, %o3
503 bz 2f
504 btst MAP_ITLB, %o3
505
506 stxa %g2, [%g3]ASI_DMMU
507 membar #Sync
508 stxa %g1, [%g0]ASI_DTLB_DATA_IN
509
510 ! condition codes still set
5112: bz 1f
512 nop
513
514 stxa %g2, [%g3]ASI_IMMU
515 membar #Sync
516 stxa %g1, [%g0]ASI_ITLB_DATA_IN
517
5181: HCALL_RET(EOK)
519
520 ! Check for I/O
5213:
522 RANGE_CHECK_IO(%g6, %g2, %g4, .hcall_mmu_map_addr_io_found,
523 .hcall_mmu_map_addr_io_not_found, %g1, %g5)
524.hcall_mmu_map_addr_io_found:
525 ba,a 4b
526.hcall_mmu_map_addr_io_not_found:
527
528 ALTENTRY(hcall_mmu_map_addr_ra_not_found)
529 ba,a herr_noraddr
530 nop
531
532 SET_SIZE(hcall_mmu_map_addr)
533
534
535/*
536 * mmu_unmap_addr
537 *
538 * arg0 vaddr (%o0)
539 * arg1 ctx (%o1)
540 * arg2 flags (%o2)
541 * --
542 * ret0 status (%o0)
543 */
544 ENTRY_NP(hcall_mmu_unmap_addr)
545#ifdef STRICT_API
546 CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
547 CHECK_MMU_FLAGS(%o2, herr_inval)
548#endif /* STRICT_API */
549 mov MMU_PCONTEXT, %g1
550 set (NCTXS - 1), %g2 ! 8K page mask
551 andn %o0, %g2, %g2
552 ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
553 mov MMU_PCONTEXT1, %g4
554 ldxa [%g4]ASI_MMU, %g5 ! save current primary ctx1
555 stxa %o1, [%g1]ASI_MMU ! switch to new ctx
556 btst MAP_ITLB, %o2
557 bz,pn %xcc, 1f
558 btst MAP_DTLB, %o2
559 stxa %g0, [%g2]ASI_IMMU_DEMAP
5601: bz,pn %xcc, 2f
561 nop
562 stxa %g0, [%g2]ASI_DMMU_DEMAP
5632: stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
564 stxa %g5, [%g4]ASI_MMU ! restore original primary ctx1
565 HCALL_RET(EOK)
566 SET_SIZE(hcall_mmu_unmap_addr)
567
568
569/*
570 * mmu_demap_page
571 *
572 * arg0/1 cpulist (%o0/%o1)
573 * arg2 vaddr (%o2)
574 * arg3 ctx (%o3)
575 * arg4 flags (%o4)
576 * --
577 * ret0 status (%o0)
578 */
579 ENTRY_NP(hcall_mmu_demap_page)
580 orcc %o0, %o1, %g0
581 bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
582#ifdef STRICT_API
583 nop
584 CHECK_VA_CTX(%o2, %o3, herr_inval, %g2)
585 CHECK_MMU_FLAGS(%o4, herr_inval)
586#endif /* STRICT_API */
587 mov MMU_PCONTEXT, %g1
588 set (NCTXS - 1), %g2
589 andn %o2, %g2, %g2
590 ldxa [%g1]ASI_MMU, %g3
591 mov MMU_PCONTEXT1, %g4
592 ldxa [%g4]ASI_MMU, %g5 ! save primary ctx1
593 stxa %o3, [%g1]ASI_MMU
594 btst MAP_ITLB, %o4
595 bz,pn %xcc, 1f
596 btst MAP_DTLB, %o4
597 stxa %g0, [%g2]ASI_IMMU_DEMAP
5981: bz,pn %xcc, 2f
599 nop
600 stxa %g0, [%g2]ASI_DMMU_DEMAP
6012: stxa %g3, [%g1]ASI_MMU ! restore primary ctx
602 stxa %g5, [%g4]ASI_MMU ! restore primary ctx1
603 HCALL_RET(EOK)
604 SET_SIZE(hcall_mmu_demap_page)
605
606
607/*
608 * mmu_demap_ctx
609 *
610 * arg0/1 cpulist (%o0/%o1)
611 * arg2 ctx (%o2)
612 * arg3 flags (%o3)
613 * --
614 * ret0 status (%o0)
615 */
616 ENTRY_NP(hcall_mmu_demap_ctx)
617 orcc %o0, %o1, %g0
618 bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
619#ifdef STRICT_API
620 nop
621 CHECK_CTX(%o2, herr_inval, %g2)
622 CHECK_MMU_FLAGS(%o3, herr_inval)
623#endif /* STRICT_API */
624 set TLB_DEMAP_CTX_TYPE, %g3
625 mov MMU_PCONTEXT, %g2
626 ldxa [%g2]ASI_MMU, %g7
627 mov MMU_PCONTEXT1, %g4
628 ldxa [%g4]ASI_MMU, %g6 ! save current primary ctx1
629 stxa %o2, [%g2]ASI_MMU
630 btst MAP_ITLB, %o3
631 bz,pn %xcc, 1f
632 btst MAP_DTLB, %o3
633 stxa %g0, [%g3]ASI_IMMU_DEMAP
6341: bz,pn %xcc, 2f
635 nop
636 stxa %g0, [%g3]ASI_DMMU_DEMAP
6372: stxa %g7, [%g2]ASI_MMU ! restore primary ctx
638 stxa %g6, [%g4]ASI_MMU ! restore primary ctx1
639 HCALL_RET(EOK)
640 SET_SIZE(hcall_mmu_demap_ctx)
641
642
643/*
644 * mmu_demap_all
645 *
646 * arg0/1 cpulist (%o0/%o1)
647 * arg2 flags (%o2)
648 * --
649 * ret0 status (%o0)
650 */
651 ENTRY_NP(hcall_mmu_demap_all)
652 orcc %o0, %o1, %g0
653 bnz,pn %xcc, herr_notsupported ! cpulist not yet supported
654#ifdef STRICT_API
655 nop
656 CHECK_MMU_FLAGS(%o2, herr_inval)
657#endif /* STRICT_API */
658 set TLB_DEMAP_ALL_TYPE, %g3
659 btst MAP_ITLB, %o2
660 bz,pn %xcc, 1f
661 btst MAP_DTLB, %o2
662 stxa %g0, [%g3]ASI_IMMU_DEMAP
6631: bz,pn %xcc, 2f
664 nop
665 stxa %g0, [%g3]ASI_DMMU_DEMAP
6662: HCALL_RET(EOK)
667 SET_SIZE(hcall_mmu_demap_all)
668
669/*
670 * mmu_map_perm_addr
671 *
672 * arg0 vaddr (%o0)
673 * arg1 context (%o1) must be zero
674 * arg2 tte (%o2)
675 * arg3 flags (%o3)
676 * --
677 * ret0 status (%o0)
678 */
679 ENTRY_NP(hcall_mmu_map_perm_addr)
680 brnz,pn %o1, herr_inval
681 VCPU_GUEST_STRUCT(%g1, %g6)
682
683 CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
684 CHECK_MMU_FLAGS(%o3, herr_inval)
685
686 ! Fail if tte isn't valid
687 brgez,pn %o2, herr_inval
688 nop
689
690 ! extract sz from tte
691 TTE_SIZE(%o2, %g4, %g2, herr_badpgsz)
692 sub %g4, 1, %g5 ! %g5 page mask
693
694 ! Fail if page-offset bits aren't zero
695 btst %g5, %o0
696 bnz,pn %xcc, herr_inval
697 .empty
698
699 ! extract ra from tte
700 sllx %o2, 64 - 40, %g2
701 srlx %g2, 64 - 40 + 13, %g2
702 sllx %g2, 13, %g2 ! %g2 real address
703 xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed
704 andn %g2, %g5, %g2
705 RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g2, %g4, herr_noraddr, %g5, %g7)
706 !! %g7 PA
707 or %g3, %g7, %g2 ! %g2 new tte with pa
708 !! %g2 = swizzled tte
709
710 /*
711 * OBP & Solaris assume demap semantics. Whack the TLBs to remove
712 * overlapping (multi-hit trap producing) entries. Note this isn't
713 * strictly necessary for incoming 8KB entries as auto-demap would
714 * properly handle those.
715 */
716 set (TLB_DEMAP_CTX_TYPE | TLB_DEMAP_NUCLEUS), %g1
717 stxa %g0, [%g1]ASI_IMMU_DEMAP
718 stxa %g0, [%g1]ASI_DMMU_DEMAP
719
720 add %g6, GUEST_PERM_MAPPINGS_LOCK, %g1
721 SPINLOCK_ENTER(%g1, %g3, %g4)
722
723 /* Search for existing perm mapping */
724 add %g6, GUEST_PERM_MAPPINGS, %g1
725 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
726 mov 0, %g4
727
728 /*
729 * Save the first uninitialised or invalid entry (TTE_V == 0)
730 * for the permanent mapping. Loop through all entries checking
731 * for an existing matching entry.
732 *
733 * for (i = NPERMMAPPINGS - 1; i >= 0; i--) {
734 * if (!table[i] || !table[i]->tte.v) {
735 * if (saved_entry == 0)
736 * saved_entry = &table[i]; // free entry
737 * continue;
738 * }
739 * if (table[i]->va == va) {
740 * saved_entry = &table[i]; // matching entry
741 * break;
742 * }
743 * }
744 */
745.pmap_loop:
746 ! %g1 = permanent mapping table base address
747 ! %g3 = current offset into table
748 ! %g4 = last free entry / saved_entry
749 add %g1, %g3, %g5
750 ldx [%g5 + MAPPING_TTE], %g6
751
752 /*
753 * if (!tte || !tte.v) {
754 * if (saved_entry == 0) {
755 * // (first invalid/uninitialised entry)
756 * saved_entry = current_entry;
757 * }
758 * continue;
759 * }
760 */
761 brgez,a,pt %g6, .pmap_continue
762 movrz %g4, %g5, %g4
763
764 /*
765 * if (m->va == va) {
766 * saved_entry = current_entry;
767 * break;
768 * }
769 *
770 * NB: overlapping mappings not detected, behavior
771 * is undefined right now. The hardware will demap
772 * when we insert and a TLB error later could reinstall
773 * both in some order where the end result is different
774 * than the post-map-perm result.
775 */
776 ldx [%g5 + MAPPING_VA], %g6
777 cmp %g6, %o0
778 be,a,pt %xcc, .pmap_break
779 mov %g5, %g4
780
781.pmap_continue:
782 deccc GUEST_PERM_MAPPINGS_INCR, %g3
783 bgeu,pt %xcc, .pmap_loop
784 nop
785
786.pmap_break:
787 ! %g4 = saved_entry
788
789 /*
790 * if (saved_entry == NULL)
791 * return (ETOOMANY);
792 */
793 brz,a,pn %g4, .pmap_return
794 mov ETOOMANY, %o0
795
796 /*
797 * if (saved_entry->tte.v)
798 * existing entry to modify
799 * else
800 * free entry to fill in
801 */
802 ldx [%g4 + MAPPING_TTE], %g5
803 brgez,pn %g5, .pmap_free_entry
804 nop
805
806 /*
807 * Compare new tte with existing tte
808 */
809 cmp %g2, %g5
810 bne,a,pn %xcc, .pmap_return
811 mov EINVAL, %o0
812
813.pmap_existing_entry:
814 VCPU_STRUCT(%g1)
815 ldub [%g1 + CPU_VID], %g1
816 mov 1, %g3
817 sllx %g3, %g1, %g1
818 ! %g1 = (1 << CPU->vid)
819
820 /*
821 * if (flags & I) {
822 * if (saved_entry->icpuset & (1 << curcpu))
823 * return (EINVAL);
824 * }
825 */
826 btst MAP_ITLB, %o3
827 bz,pn %xcc, 1f
828 nop
829 ldx [%g4 + MAPPING_ICPUSET], %g5
830 btst %g1, %g5
831 bnz,a,pn %xcc, .pmap_return
832 mov EINVAL, %o0
8331:
834 /*
835 * if (flags & D) {
836 * if (saved_entry->dcpuset & (1 << curcpu))
837 * return (EINVAL);
838 * }
839 */
840 btst MAP_DTLB, %o3
841 bz,pn %xcc, 2f
842 nop
843 ldx [%g4 + MAPPING_DCPUSET], %g5
844 btst %g1, %g5
845 bnz,a,pn %xcc, .pmap_return
846 mov EINVAL, %o0
8472:
848 ba,pt %xcc, .pmap_finish
849 nop
850
851.pmap_free_entry:
852 /*
853 * m->va = va;
854 * m->tte = tte;
855 */
856 stx %o0, [%g4 + MAPPING_VA]
857 stx %g2, [%g4 + MAPPING_TTE]
858
859.pmap_finish:
860 VCPU_STRUCT(%g1)
861 ldub [%g1 + CPU_VID], %g3
862 mov 1, %g1
863 sllx %g1, %g3, %g1
864 ! %g1 = (1 << CPU->vid)
865 ! %g3 = pid
866 ! %g4 = saved_entry
867
868 /*
869 * If no other strands on this core have this mapping then map
870 * it in both TLBs.
871 *
872 * if (((m->icpuset >> (CPU2COREID(curcpu) * 8)) & 0xff) == 0 &&
873 * ((m->dcpuset >> (CPU2COREID(curcpu) * 8)) & 0xff) == 0) {
874 * map in iTLB
875 * map in dTLB
876 * }
877 */
878 ldx [%g4 + MAPPING_ICPUSET], %g5
879 ldx [%g4 + MAPPING_DCPUSET], %g3
880 or %g5, %g3, %g5
881 PCPUID2COREID(%g3, %g6)
882 sllx %g6, CPUID_2_COREID_SHIFT, %g6 ! %g6 * NSTRANDSPERCORE
883 srlx %g5, %g6, %g7
884 btst CORE_MASK, %g7
885 bnz,pt %xcc, 0f
886 mov MMU_TAG_ACCESS, %g3
887
888 stxa %o0, [%g3]ASI_IMMU
889 membar #Sync
890 stxa %g2, [%g0]ASI_ITLB_DATA_IN
891 membar #Sync
892 stxa %o0, [%g3]ASI_DMMU
893 membar #Sync
894 stxa %g2, [%g0]ASI_DTLB_DATA_IN
895 membar #Sync
896
8970:
898 /*
899 * if (flags & I)
900 * m->icpuset |= (1 << CPU->pid);
901 * }
902 */
903 btst MAP_ITLB, %o3
904 bz,pn %xcc, 3f
905 ldx [%g4 + MAPPING_ICPUSET], %g5
906
907 or %g5, %g1, %g5
908 stx %g5, [%g4 + MAPPING_ICPUSET]
909
9103:
911 /*
912 * if (flags & D) {
913 * m->dcpuset |= (1 << CPU->pid);
914 * }
915 */
916 btst MAP_DTLB, %o3
917 bz,pn %xcc, 4f
918 ldx [%g4 + MAPPING_DCPUSET], %g5
919
920 or %g5, %g1, %g5
921 stx %g5, [%g4 + MAPPING_DCPUSET]
922
9234:
924 mov EOK, %o0
925
926.pmap_return:
927 GUEST_STRUCT(%g1)
928 inc GUEST_PERM_MAPPINGS_LOCK, %g1
929 SPINLOCK_EXIT(%g1)
930 done
931 SET_SIZE(hcall_mmu_map_perm_addr)
932
933
934/*
935 * mmu_unmap_perm_addr
936 *
937 * arg0 vaddr (%o0)
938 * arg1 ctx (%o1)
939 * arg2 flags (%o2)
940 * --
941 * ret0 status (%o0)
942 */
943 ENTRY_NP(hcall_mmu_unmap_perm_addr)
944 brnz,pn %o1, herr_inval
945 nop
946 CHECK_VA_CTX(%o0, %o1, herr_inval, %g2)
947 CHECK_MMU_FLAGS(%o2, herr_inval)
948
949 /*
950 * Search for existing perm mapping
951 */
952 GUEST_STRUCT(%g6)
953 add %g6, GUEST_PERM_MAPPINGS, %g1
954 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
955 mov 0, %g4
956
957 add %g6, GUEST_PERM_MAPPINGS_LOCK, %g2
958 SPINLOCK_ENTER(%g2, %g5, %g6)
959
960 /*
961 * for (i = NPERMMAPPINGS - 1; i >= 0; i--) {
962 * if (!table[i]->tte.v)
963 * continue;
964 * if (table[i]->va == va)
965 * break;
966 * }
967 */
968.punmap_loop:
969 ! %g1 = permanent mapping table base address
970 ! %g3 = current offset into table
971 ! %g4 = last free entry / saved_entry
972 add %g1, %g3, %g5
973 ldx [%g5 + MAPPING_TTE], %g6
974
975 /*
976 * if (!m->tte.v)
977 * continue;
978 */
979 brgez,pt %g6, .punmap_continue
980 nop
981
982 /*
983 * if (m->va == va)
984 * break;
985 */
986 ldx [%g5 + MAPPING_VA], %g6
987 cmp %g6, %o0
988 be,pt %xcc, .punmap_break
989 nop
990
991.punmap_continue:
992 deccc GUEST_PERM_MAPPINGS_INCR, %g3
993 bgeu,pt %xcc, .punmap_loop
994 nop
995
996.punmap_break:
997 ! %g5 = entry in mapping table
998
999 /*
1000 * if (i < 0)
1001 * return (EINVAL);
1002 */
1003 brlz,a,pn %g3, .punmap_return
1004 mov ENOMAP, %o0
1005
1006 VCPU_STRUCT(%g1)
1007 ldub [%g1 + CPU_VID], %g3
1008 mov 1, %g1
1009 sllx %g1, %g3, %g1
1010 ! %g1 = (1 << CPU->vid)
1011 ! %g3 = pid
1012 ! %g5 = entry in mapping table
1013
1014 /*
1015 * if (flags & MAP_I) {
1016 * m->cpuset_i &= ~(1 << curcpu);
1017 * }
1018 */
1019 btst MAP_ITLB, %o2
1020 bz,pn %xcc, 1f
1021 nop
1022
1023 ldx [%g5 + MAPPING_ICPUSET], %g2
1024 andn %g2, %g1, %g2
1025 stx %g2, [%g5 + MAPPING_ICPUSET]
1026
10271:
1028 /*
1029 * if (flags & MAP_D) {
1030 * m->cpuset_d &= ~(1 << curcpu);
1031 * }
1032 */
1033 btst MAP_DTLB, %o2
1034 bz,pn %xcc, 2f
1035 nop
1036
1037 ldx [%g5 + MAPPING_DCPUSET], %g2
1038 andn %g2, %g1, %g2
1039 stx %g2, [%g5 + MAPPING_DCPUSET]
1040
10412:
1042 /*
1043 *
1044 * If no other strands on this core still use this mapping
1045 * then demap it in both TLBs.
1046 *
1047 * if (((m->cpuset_i >> (CPU2COREID(curcpu) * 8)) & 0xff) == 0 &&
1048 * ((m->cpuset_d >> (CPU2COREID(curcpu) * 8)) & 0xff) == 0) {
1049 * demap in iTLB
1050 * demap in dTLB
1051 * }
1052 */
1053 ldx [%g5 + MAPPING_ICPUSET], %g4
1054 ldx [%g5 + MAPPING_DCPUSET], %g3
1055 or %g4, %g3, %g4
1056 PCPUID2COREID(%g3, %g6)
1057 sllx %g6, CPUID_2_COREID_SHIFT, %g6 ! %g6 * NSTRANDSPERCORE
1058 srlx %g4, %g6, %g7
1059 btst CORE_MASK, %g7
1060 bnz,pt %xcc, 3f
1061 mov MMU_PCONTEXT, %g1
1062
1063 mov MMU_PCONTEXT1, %g4
1064 ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx
1065 ldxa [%g4]ASI_MMU, %g6 ! save current primary ctx1
1066 stxa %o1, [%g1]ASI_MMU ! switch to new ctx
1067 stxa %g0, [%o0]ASI_IMMU_DEMAP
1068 stxa %g0, [%o0]ASI_DMMU_DEMAP
1069 stxa %g3, [%g1]ASI_MMU ! restore original primary ctx
1070 stxa %g6, [%g4]ASI_MMU ! restore original primary ctx1
1071
1072 /*
1073 * if (m->cpuset_d == 0 && m->cpuset_i == 0) {
1074 * m->va = 0;
1075 * m->tte = tte & ~TTE_V;
1076 * }
1077 */
1078 ldx [%g5 + MAPPING_DCPUSET], %g1
1079 ldx [%g5 + MAPPING_ICPUSET], %g2
1080 orcc %g1, %g2, %g0
1081 bnz,pt %xcc, 3f
1082 nop
1083
1084 stx %g0, [%g5 + MAPPING_VA]
1085 ! clear TTE_V, bit 63
1086 ldx [%g5 + MAPPING_TTE], %g1
1087 sllx %g1, 1, %g1
1088 srlx %g1, 1, %g1
1089 stx %g1, [%g5 + MAPPING_TTE]
10903:
1091 mov EOK, %o0
1092
1093.punmap_return:
1094 GUEST_STRUCT(%g1)
1095 inc GUEST_PERM_MAPPINGS_LOCK, %g1
1096 SPINLOCK_EXIT(%g1)
1097 done
1098 SET_SIZE(hcall_mmu_unmap_perm_addr)
1099
1100
1101#ifdef DEBUG /* { */
1102
1103/*
1104 * mmu_perm_addr_info
1105 *
1106 * arg0 buffer (%o0)
1107 * arg1 nentries (%o1)
1108 * --
1109 * ret0 status (%o0)
1110 * ret1 nentries (%o1)
1111 */
1112 ENTRY_NP(hcall_mmu_perm_addr_info)
1113 GUEST_STRUCT(%g7)
1114 ! %g7 guestp
1115
1116 ! Check to see if table fits into the supplied buffer
1117 cmp %o1, NPERMMAPPINGS
1118 blu,pn %xcc, herr_inval
1119 mov NPERMMAPPINGS, %o1
1120
1121 btst 3, %o0
1122 bnz,pn %xcc, herr_badalign
1123 mulx %o1, PERMMAPINFO_BYTES, %g3
1124 ! %g3 size of permmap table in bytes
1125 RA2PA_RANGE_CONV_UNK_SIZE(%g7, %o0, %g3, herr_noraddr, %g5, %g2)
1126 ! %g2 pa of buffer
1127
1128 add %g7, GUEST_PERM_MAPPINGS_LOCK, %g1
1129 SPINLOCK_ENTER(%g1, %g3, %g4)
1130
1131 /*
1132 * Search for valid perm mappings
1133 */
1134 add %g7, GUEST_PERM_MAPPINGS, %g1
1135 mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g3
1136 mov 0, %o1
1137 add %g1, %g3, %g4
1138.perm_info_loop:
1139 ! %o1 = count of valid entries
1140 ! %g1 = base of mapping table
1141 ! %g2 = pa of guest's buffer
1142 ! %g3 = current offset into table
1143 ! %g4 = current entry in table
1144 ! %g7 = guestp
1145 ldx [%g4 + MAPPING_TTE], %g5
1146 brgez,pn %g5, .perm_info_continue
1147 nop
1148
1149 /* Found a valid mapping */
1150 ldx [%g4 + MAPPING_VA], %g5
1151 stx %g5, [%g2 + PERMMAPINFO_VA]
1152 stx %g0, [%g2 + PERMMAPINFO_CTX]
1153 ldx [%g4 + MAPPING_TTE], %g5
1154 stx %g5, [%g2 + PERMMAPINFO_TTE]
1155
1156 VCPU_STRUCT(%g5)
1157 ldub [%g5 + CPU_VID], %g5
1158 mov 1, %o0
1159 sllx %o0, %g5, %o0
1160 ! %o0 = curcpu bit mask
1161 mov 0, %g6
1162 ! %g6 = flags
1163 ldx [%g4 + MAPPING_ICPUSET], %g5
1164 btst %g5, %o0
1165 bnz,a,pt %xcc, 0f
1166 or %g6, MAP_ITLB, %g6
11670: ldx [%g4 + MAPPING_DCPUSET], %g5
1168 btst %g5, %o0
1169 bnz,a,pt %xcc, 0f
1170 or %g6, MAP_DTLB, %g6
11710: stx %g6, [%g4 + PERMMAPINFO_FLAGS]
1172
1173 inc %o1
1174 inc PERMMAPINFO_BYTES, %g2
1175
1176.perm_info_continue:
1177 deccc GUEST_PERM_MAPPINGS_INCR, %g3
1178 bgeu,pt %xcc, .perm_info_loop
1179 add %g1, %g3, %g4
1180
1181 GUEST_STRUCT(%g1)
1182 inc GUEST_PERM_MAPPINGS_LOCK, %g1
1183 SPINLOCK_EXIT(%g1)
1184
1185 HCALL_RET(EOK)
1186 SET_SIZE(hcall_mmu_perm_addr_info)
1187
1188#endif /* } DEBUG */