Commit | Line | Data |
---|---|---|
920dae64 AT |
1 | /* |
2 | * ========== Copyright Header Begin ========================================== | |
3 | * | |
4 | * Hypervisor Software File: hcall_mmu.s | |
5 | * | |
6 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. | |
7 | * | |
8 | * - Do no alter or remove copyright notices | |
9 | * | |
10 | * - Redistribution and use of this software in source and binary forms, with | |
11 | * or without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistribution of source code must retain the above copyright notice, | |
15 | * this list of conditions and the following disclaimer. | |
16 | * | |
17 | * - Redistribution in binary form must reproduce the above copyright notice, | |
18 | * this list of conditions and the following disclaimer in the | |
19 | * documentation and/or other materials provided with the distribution. | |
20 | * | |
21 | * Neither the name of Sun Microsystems, Inc. or the names of contributors | |
22 | * may be used to endorse or promote products derived from this software | |
23 | * without specific prior written permission. | |
24 | * | |
25 | * This software is provided "AS IS," without a warranty of any kind. | |
26 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | |
27 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | |
28 | * PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN | |
29 | * MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR | |
30 | * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR | |
31 | * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN | |
32 | * OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR | |
33 | * FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE | |
34 | * DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, | |
35 | * ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF | |
36 | * SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. | |
37 | * | |
38 | * You acknowledge that this software is not designed, licensed or | |
39 | * intended for use in the design, construction, operation or maintenance of | |
40 | * any nuclear facility. | |
41 | * | |
42 | * ========== Copyright Header End ============================================ | |
43 | */ | |
44 | /* | |
45 | * Copyright 2007 Sun Microsystems, Inc. All rights reserved. | |
46 | * Use is subject to license terms. | |
47 | */ | |
48 | ||
49 | .ident "@(#)hcall_mmu.s 1.99 07/06/20 SMI" | |
50 | ||
51 | #include <sys/asm_linkage.h> | |
52 | #include <asi.h> | |
53 | #include <sun4v/mmu.h> | |
54 | #include <mmu.h> | |
55 | #include <hprivregs.h> | |
56 | #include <guest.h> | |
57 | #include <offsets.h> | |
58 | #include <mmustat.h> | |
59 | #include <util.h> | |
60 | ||
61 | /* | |
62 | * mmu_tsb_ctx0 | |
63 | * | |
64 | * arg0 ntsb (%o0) | |
65 | * arg1 tsbs (%o1) | |
66 | * -- | |
67 | * ret0 status (%o0) | |
68 | */ | |
69 | ENTRY_NP(hcall_mmu_tsb_ctx0) | |
70 | VCPU_GUEST_STRUCT(%g5, %g6) | |
71 | /* set cpu->ntsbs to zero now in case we error exit */ | |
72 | stx %g0, [%g5 + CPU_NTSBS_CTX0] | |
73 | /* Also zero out H/W bases */ | |
74 | ba set_dummytsb_ctx0 | |
75 | rd %pc, %g7 | |
76 | brz,pn %o0, setntsbs0 | |
77 | cmp %o0, MAX_NTSB | |
78 | bgu,pn %xcc, herr_inval | |
79 | btst TSBD_ALIGNMENT - 1, %o1 | |
80 | bnz,pn %xcc, herr_badalign | |
81 | sllx %o0, TSBD_SHIFT, %g3 | |
82 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g3, herr_noraddr, %g2, %g1) | |
83 | ! %g1 paddr | |
84 | add %g5, CPU_TSBDS_CTX0, %g2 | |
85 | ! xcopy trashes g1-4 | |
86 | ba xcopy | |
87 | rd %pc, %g7 | |
88 | /* loop over each TSBD and validate */ | |
89 | mov %o0, %g1 | |
90 | add %g5, CPU_TSBDS_CTX0, %g2 | |
91 | 1: | |
92 | /* check pagesize - accept only valid encodings */ | |
93 | lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3 | |
94 | cmp %g3, NPGSZ | |
95 | bgeu,pn %xcc, herr_badpgsz | |
96 | mov 1, %g4 | |
97 | sll %g4, %g3, %g3 | |
98 | btst TTE_VALIDSIZEARRAY, %g3 | |
99 | bz,pn %icc, herr_badpgsz | |
100 | nop | |
101 | ||
102 | /* check that pageszidx is set in pageszmask */ | |
103 | lduw [%g2 + TSBD_PGSZS_OFF], %g4 | |
104 | btst %g3, %g4 | |
105 | bz,pn %icc, herr_inval | |
106 | ||
107 | /* check that pageszidx is lowest-order bit of pageszmask */ | |
108 | sub %g3, 1, %g3 | |
109 | btst %g3, %g4 | |
110 | bnz,p %icc, herr_inval | |
111 | nop | |
112 | ||
113 | /* check associativity - only support 1-way */ | |
114 | lduh [%g2 + TSBD_ASSOC_OFF], %g3 | |
115 | cmp %g3, 1 | |
116 | bne,pn %icc, herr_badtsb | |
117 | nop | |
118 | /* check TSB size */ | |
119 | ld [%g2 + TSBD_SIZE_OFF], %g3 | |
120 | sub %g3, 1, %g4 | |
121 | btst %g3, %g4 | |
122 | bnz,pn %icc, herr_badtsb | |
123 | mov TSB_SZ0_ENTRIES, %g4 | |
124 | cmp %g3, %g4 | |
125 | blt,pn %icc, herr_badtsb | |
126 | sll %g4, TSB_MAX_SZCODE, %g4 | |
127 | cmp %g3, %g4 | |
128 | bgt,pn %icc, herr_badtsb | |
129 | nop | |
130 | /* check context index field - must be -1 (shared) or zero */ | |
131 | ld [%g2 + TSBD_CTX_INDEX], %g3 | |
132 | cmp %g3, TSBD_CTX_IDX_SHARE | |
133 | be %icc, 2f ! -1 is OK | |
134 | nop | |
135 | brnz,pn %g3, herr_inval ! only one set of context regs | |
136 | nop | |
137 | 2: | |
138 | /* check reserved field - must be zero for now */ | |
139 | ldx [%g2 + TSBD_RSVD_OFF], %g3 | |
140 | brnz,pn %g3, herr_inval | |
141 | nop | |
142 | /* check TSB base real address */ | |
143 | ldx [%g2 + TSBD_BASE_OFF], %g3 | |
144 | ld [%g2 + TSBD_SIZE_OFF], %g4 | |
145 | sllx %g4, TSBE_SHIFT, %g4 | |
146 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, herr_noraddr, %g7, %g2) | |
147 | ! restore %g2 | |
148 | add %g5, CPU_TSBDS_CTX0, %g2 | |
149 | ||
150 | /* range OK, check alignment */ | |
151 | sub %g4, 1, %g4 | |
152 | btst %g3, %g4 | |
153 | bnz,pn %xcc, herr_badalign | |
154 | sub %g1, 1, %g1 | |
155 | brnz,pt %g1, 1b | |
156 | add %g2, TSBD_BYTES, %g2 | |
157 | ||
158 | /* now setup H/W TSB regs */ | |
159 | /* only look at first two TSBDs for now */ | |
160 | add %g5, CPU_TSBDS_CTX0, %g2 | |
161 | ldx [%g2 + TSBD_BASE_OFF], %g1 | |
162 | RA2PA_CONV(%g6, %g1, %g1, %g4) | |
163 | ld [%g2 + TSBD_SIZE_OFF], %g4 | |
164 | srl %g4, TSB_SZ0_SHIFT, %g4 | |
165 | 1: | |
166 | btst 1, %g4 | |
167 | srl %g4, 1, %g4 | |
168 | bz,a,pt %icc, 1b | |
169 | add %g1, 1, %g1 ! increment TSB size field | |
170 | ||
171 | stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS0 | |
172 | stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS0 | |
173 | ||
174 | lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3 | |
175 | stxa %g3, [%g0]ASI_DTSB_CONFIG_CTX0 ! (PS0 only) | |
176 | stxa %g3, [%g0]ASI_ITSB_CONFIG_CTX0 ! (PS0 only) | |
177 | ||
178 | /* process second TSBD, if available */ | |
179 | cmp %o0, 1 | |
180 | be,pt %xcc, 2f | |
181 | add %g2, TSBD_BYTES, %g2 ! move to next TSBD | |
182 | ldx [%g2 + TSBD_BASE_OFF], %g1 | |
183 | RA2PA_CONV(%g6, %g1, %g1, %g4) | |
184 | ld [%g2 + TSBD_SIZE_OFF], %g4 | |
185 | srl %g4, TSB_SZ0_SHIFT, %g4 | |
186 | 1: | |
187 | btst 1, %g4 | |
188 | srl %g4, 1, %g4 | |
189 | bz,a,pt %icc, 1b | |
190 | add %g1, 1, %g1 ! increment TSB size field | |
191 | ||
192 | stxa %g1, [%g0]ASI_DTSBBASE_CTX0_PS1 | |
193 | stxa %g1, [%g0]ASI_ITSBBASE_CTX0_PS1 | |
194 | ||
195 | /* %g3 still has old CONFIG value. */ | |
196 | lduh [%g2 + TSBD_IDXPGSZ_OFF], %g7 | |
197 | sllx %g7, ASI_TSB_CONFIG_PS1_SHIFT, %g7 | |
198 | or %g3, %g7, %g3 | |
199 | stxa %g3, [%g0]ASI_DTSB_CONFIG_CTX0 ! (PS0 + PS1) | |
200 | stxa %g3, [%g0]ASI_ITSB_CONFIG_CTX0 ! (PS0 + PS1) | |
201 | ||
202 | 2: | |
203 | stx %o0, [%g5 + CPU_NTSBS_CTX0] | |
204 | setntsbs0: | |
205 | clr %o1 ! no return value | |
206 | HCALL_RET(EOK) | |
207 | SET_SIZE(hcall_mmu_tsb_ctx0) | |
208 | ||
209 | ||
210 | /* | |
211 | * mmu_tsb_ctxnon0 | |
212 | * | |
213 | * arg0 ntsb (%o0) | |
214 | * arg1 tsbs (%o1) | |
215 | * -- | |
216 | * ret0 status (%o0) | |
217 | */ | |
218 | ENTRY_NP(hcall_mmu_tsb_ctxnon0) | |
219 | VCPU_GUEST_STRUCT(%g5, %g6) | |
220 | /* set cpu->ntsbs to zero now in case we error exit */ | |
221 | stx %g0, [%g5 + CPU_NTSBS_CTXN] | |
222 | /* Also zero out H/W bases */ | |
223 | ba set_dummytsb_ctxN | |
224 | rd %pc, %g7 | |
225 | brz,pn %o0, setntsbsN | |
226 | cmp %o0, MAX_NTSB | |
227 | bgu,pn %xcc, herr_inval | |
228 | btst TSBD_ALIGNMENT - 1, %o1 | |
229 | bnz,pn %xcc, herr_badalign | |
230 | sllx %o0, TSBD_SHIFT, %g3 | |
231 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %o1, %g3, herr_noraddr, %g2, %g1) | |
232 | ! %g1 paddr | |
233 | /* xcopy(tsbs, cpu->tsbds, ntsbs*TSBD_BYTES) */ | |
234 | add %g5, CPU_TSBDS_CTXN, %g2 | |
235 | ! xcopy trashes g1-4 | |
236 | ba xcopy | |
237 | rd %pc, %g7 | |
238 | /* loop over each TSBD and validate */ | |
239 | mov %o0, %g1 | |
240 | add %g5, CPU_TSBDS_CTXN, %g2 | |
241 | 1: | |
242 | /* check pagesize - accept only valid encodings */ | |
243 | lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3 | |
244 | cmp %g3, NPGSZ | |
245 | bgeu,pn %xcc, herr_badpgsz | |
246 | mov 1, %g4 | |
247 | sll %g4, %g3, %g3 | |
248 | btst TTE_VALIDSIZEARRAY, %g3 | |
249 | bz,pn %icc, herr_badpgsz | |
250 | nop | |
251 | ||
252 | /* check that pageszidx is set in pageszmask */ | |
253 | lduw [%g2 + TSBD_PGSZS_OFF], %g4 | |
254 | btst %g3, %g4 | |
255 | bz,pn %icc, herr_inval | |
256 | ||
257 | /* check that pageszidx is lowest-order bit of pageszmask */ | |
258 | sub %g3, 1, %g3 | |
259 | btst %g3, %g4 | |
260 | bnz,pn %icc, herr_inval | |
261 | nop | |
262 | ||
263 | /* check associativity - only support 1-way */ | |
264 | lduh [%g2 + TSBD_ASSOC_OFF], %g3 | |
265 | cmp %g3, 1 | |
266 | bne,pn %icc, herr_badtsb | |
267 | nop | |
268 | /* check TSB size */ | |
269 | ld [%g2 + TSBD_SIZE_OFF], %g3 | |
270 | sub %g3, 1, %g4 | |
271 | btst %g3, %g4 | |
272 | bnz,pn %icc, herr_badtsb | |
273 | mov TSB_SZ0_ENTRIES, %g4 | |
274 | cmp %g3, %g4 | |
275 | blt,pn %icc, herr_badtsb | |
276 | sll %g4, TSB_MAX_SZCODE, %g4 | |
277 | cmp %g3, %g4 | |
278 | bgt,pn %icc, herr_badtsb | |
279 | nop | |
280 | /* check context index field - must be -1 (shared) or zero */ | |
281 | ld [%g2 + TSBD_CTX_INDEX], %g3 | |
282 | cmp %g3, TSBD_CTX_IDX_SHARE | |
283 | be %icc, 2f ! -1 is OK | |
284 | nop | |
285 | brnz,pn %g3, herr_inval ! only one set of context regs | |
286 | nop | |
287 | 2: | |
288 | /* check reserved field - must be zero for now */ | |
289 | ldx [%g2 + TSBD_RSVD_OFF], %g3 | |
290 | brnz,pn %g3, herr_inval | |
291 | nop | |
292 | /* check TSB base real address */ | |
293 | ldx [%g2 + TSBD_BASE_OFF], %g3 | |
294 | ld [%g2 + TSBD_SIZE_OFF], %g4 | |
295 | sllx %g4, TSBE_SHIFT, %g4 | |
296 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g3, %g4, herr_noraddr, %g7, %g2) | |
297 | ! restore %g2 | |
298 | add %g5, CPU_TSBDS_CTXN, %g2 | |
299 | /* range OK, check alignment */ | |
300 | sub %g4, 1, %g4 | |
301 | btst %g3, %g4 | |
302 | bnz,pn %xcc, herr_badalign | |
303 | sub %g1, 1, %g1 | |
304 | brnz,pt %g1, 1b | |
305 | add %g2, TSBD_BYTES, %g2 | |
306 | ||
307 | /* now setup H/W TSB regs */ | |
308 | /* only look at first two TSBDs for now */ | |
309 | add %g5, CPU_TSBDS_CTXN, %g2 | |
310 | ldx [%g2 + TSBD_BASE_OFF], %g1 | |
311 | RA2PA_CONV(%g6, %g1, %g1, %g4) | |
312 | ld [%g2 + TSBD_SIZE_OFF], %g4 | |
313 | srl %g4, TSB_SZ0_SHIFT, %g4 | |
314 | 1: | |
315 | btst 1, %g4 | |
316 | srl %g4, 1, %g4 | |
317 | bz,a,pt %icc, 1b | |
318 | add %g1, 1, %g1 ! increment TSB size field | |
319 | ||
320 | stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS0 | |
321 | stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS0 | |
322 | ||
323 | lduh [%g2 + TSBD_IDXPGSZ_OFF], %g3 | |
324 | stxa %g3, [%g0]ASI_DTSB_CONFIG_CTXN ! (PS0 only) | |
325 | stxa %g3, [%g0]ASI_ITSB_CONFIG_CTXN ! (PS0 only) | |
326 | ||
327 | /* process second TSBD, if available */ | |
328 | cmp %o0, 1 | |
329 | be,pt %xcc, 2f | |
330 | add %g2, TSBD_BYTES, %g2 ! move to next TSBD | |
331 | ldx [%g2 + TSBD_BASE_OFF], %g1 | |
332 | RA2PA_CONV(%g6, %g1, %g1, %g4) | |
333 | ld [%g2 + TSBD_SIZE_OFF], %g4 | |
334 | srl %g4, TSB_SZ0_SHIFT, %g4 | |
335 | 1: | |
336 | btst 1, %g4 | |
337 | srl %g4, 1, %g4 | |
338 | bz,a,pt %icc, 1b | |
339 | add %g1, 1, %g1 ! increment TSB size field | |
340 | ||
341 | stxa %g1, [%g0]ASI_DTSBBASE_CTXN_PS1 | |
342 | stxa %g1, [%g0]ASI_ITSBBASE_CTXN_PS1 | |
343 | ||
344 | /* %g3 still has old CONFIG value. */ | |
345 | lduh [%g2 + TSBD_IDXPGSZ_OFF], %g7 | |
346 | sllx %g7, ASI_TSB_CONFIG_PS1_SHIFT, %g7 | |
347 | or %g3, %g7, %g3 | |
348 | stxa %g3, [%g0]ASI_DTSB_CONFIG_CTXN ! (PS0 + PS1) | |
349 | stxa %g3, [%g0]ASI_ITSB_CONFIG_CTXN ! (PS0 + PS1) | |
350 | ||
351 | 2: | |
352 | stx %o0, [%g5 + CPU_NTSBS_CTXN] | |
353 | setntsbsN: | |
354 | clr %o1 ! no return value | |
355 | HCALL_RET(EOK) | |
356 | SET_SIZE(hcall_mmu_tsb_ctxnon0) | |
357 | ||
358 | ||
359 | /* | |
360 | * mmu_tsb_ctx0_info | |
361 | * | |
362 | * arg0 maxtsbs (%o0) | |
363 | * arg1 tsbs (%o1) | |
364 | * -- | |
365 | * ret0 status (%o0) | |
366 | * ret1 ntsbs (%o1) | |
367 | */ | |
368 | ENTRY_NP(hcall_mmu_tsb_ctx0_info) | |
369 | VCPU_GUEST_STRUCT(%g5, %g6) | |
370 | ! %g5 cpup | |
371 | ! %g6 guestp | |
372 | ||
373 | ! actual ntsbs always returned in %o1, so save tsbs now | |
374 | mov %o1, %g4 | |
375 | ! Check to see if ntsbs fits into the supplied buffer | |
376 | ldx [%g5 + CPU_NTSBS_CTX0], %o1 | |
377 | brz,pn %o1, hret_ok | |
378 | cmp %o1, %o0 | |
379 | bgu,pn %xcc, herr_inval | |
380 | nop | |
381 | ||
382 | btst TSBD_ALIGNMENT - 1, %g4 | |
383 | bnz,pn %xcc, herr_badalign | |
384 | sllx %o1, TSBD_SHIFT, %g3 | |
385 | ! %g3 size of tsbd in bytes | |
386 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g4, %g3, herr_noraddr, %g1, %g2) | |
387 | ! %g2 paddr | |
388 | ! %g2 pa of buffer | |
389 | ! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES) | |
390 | add %g5, CPU_TSBDS_CTX0, %g1 | |
391 | ! clobbers %g1-%g4 | |
392 | ba xcopy | |
393 | rd %pc, %g7 | |
394 | ||
395 | HCALL_RET(EOK) | |
396 | SET_SIZE(hcall_mmu_tsb_ctx0_info) | |
397 | ||
398 | ||
399 | /* | |
400 | * mmu_tsb_ctxnon0_info | |
401 | * | |
402 | * arg0 maxtsbs (%o0) | |
403 | * arg1 tsbs (%o1) | |
404 | * -- | |
405 | * ret0 status (%o0) | |
406 | * ret1 ntsbs (%o1) | |
407 | */ | |
408 | ENTRY_NP(hcall_mmu_tsb_ctxnon0_info) | |
409 | VCPU_GUEST_STRUCT(%g5, %g6) | |
410 | ! %g5 cpup | |
411 | ! %g6 guestp | |
412 | ||
413 | ! actual ntsbs always returned in %o1, so save tsbs now | |
414 | mov %o1, %g4 | |
415 | ! Check to see if ntsbs fits into the supplied buffer | |
416 | ldx [%g5 + CPU_NTSBS_CTXN], %o1 | |
417 | brz,pn %o1, hret_ok | |
418 | cmp %o1, %o0 | |
419 | bgu,pn %xcc, herr_inval | |
420 | nop | |
421 | ||
422 | btst TSBD_ALIGNMENT - 1, %g4 | |
423 | bnz,pn %xcc, herr_badalign | |
424 | sllx %o1, TSBD_SHIFT, %g3 | |
425 | ! %g3 size of tsbd in bytes | |
426 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g4, %g3, herr_noraddr, %g1, %g2) | |
427 | ! %g2 paddr | |
428 | ! %g2 pa of buffer | |
429 | ! xcopy(cpu->tsbds, buffer, ntsbs*TSBD_BYTES) | |
430 | add %g5, CPU_TSBDS_CTXN, %g1 | |
431 | ! clobbers %g1-%g4 | |
432 | ba xcopy | |
433 | rd %pc, %g7 | |
434 | ||
435 | HCALL_RET(EOK) | |
436 | SET_SIZE(hcall_mmu_tsb_ctxnon0_info) | |
437 | ||
438 | ||
439 | /* | |
440 | * mmu_map_addr - stuff ttes directly into the tlbs | |
441 | * | |
442 | * arg0 vaddr (%o0) | |
443 | * arg1 ctx (%o1) | |
444 | * arg2 tte (%o2) | |
445 | * arg3 flags (%o3) | |
446 | * -- | |
447 | * ret0 status (%o0) | |
448 | */ | |
449 | ENTRY_NP(hcall_mmu_map_addr) | |
450 | #if MAPTR /* { FIXME: */ | |
451 | PRINT("mmu_map_addr: va=0x") | |
452 | PRINTX(%o0) | |
453 | PRINT(" ctx=0x") | |
454 | PRINTX(%o1) | |
455 | PRINT(" flags=0x") | |
456 | PRINTX(%o2) | |
457 | PRINT("\r\n") | |
458 | 1: | |
459 | #endif /* } */ | |
460 | VCPU_GUEST_STRUCT(%g1, %g6) | |
461 | ||
462 | #ifdef STRICT_API | |
463 | CHECK_VA_CTX(%o0, %o1, herr_inval, %g2) | |
464 | CHECK_MMU_FLAGS(%o3, herr_inval) | |
465 | #endif /* STRICT_API */ | |
466 | ||
467 | ! extract sz from tte | |
468 | TTE_SIZE(%o2, %g4, %g2, herr_badpgsz) | |
469 | sub %g4, 1, %g5 ! %g5 page mask | |
470 | ||
471 | ! extract ra from tte | |
472 | sllx %o2, 64 - 40, %g2 | |
473 | srlx %g2, 64 - 40 + 13, %g2 | |
474 | sllx %g2, 13, %g2 ! %g2 real address | |
475 | xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed | |
476 | andn %g2, %g5, %g2 | |
477 | /* FIXME: This eventually to also cover the IO | |
478 | * address ranges, and TTE flags as appropriate | |
479 | */ | |
480 | RA2PA_RANGE_CONV(%g6, %g2, %g4, 3f, %g5, %g7) | |
481 | mov %g7, %g2 | |
482 | 4: or %g3, %g2, %g1 ! %g1 new tte with pa | |
483 | ||
484 | #ifndef STRICT_API | |
485 | set (NCTXS - 1), %g3 | |
486 | and %o1, %g3, %o1 | |
487 | andn %o0, %g3, %o0 | |
488 | #endif /* STRICT_API */ | |
489 | or %o0, %o1, %g2 ! %g2 tag | |
490 | mov MMU_TAG_ACCESS, %g3 ! %g3 tag_access | |
491 | CLEAR_TTE_LOCK_BIT(%g1, %g4) | |
492 | set TLB_IN_4V_FORMAT, %g5 ! %g5 sun4v-style tte selection | |
493 | ||
494 | btst MAP_DTLB, %o3 | |
495 | bz 2f | |
496 | btst MAP_ITLB, %o3 | |
497 | ||
498 | stxa %g2, [%g3]ASI_DMMU | |
499 | membar #Sync | |
500 | stxa %g1, [%g5]ASI_DTLB_DATA_IN | |
501 | ! condition codes still set | |
502 | 2: bz 1f | |
503 | nop | |
504 | ||
505 | stxa %g2, [%g3]ASI_IMMU | |
506 | membar #Sync | |
507 | stxa %g1, [%g5]ASI_ITLB_DATA_IN | |
508 | ||
509 | 1: HCALL_RET(EOK) | |
510 | ||
511 | ! Check for I/O | |
512 | 3: | |
513 | RANGE_CHECK_IO(%g6, %g2, %g4, .hcall_mmu_map_addr_io_found, | |
514 | .hcall_mmu_map_addr_io_not_found, %g1, %g5) | |
515 | .hcall_mmu_map_addr_io_found: | |
516 | ba,a 4b | |
517 | nop | |
518 | .hcall_mmu_map_addr_io_not_found: | |
519 | ! %g1 = cpu struct | |
520 | ! %g2 = real address | |
521 | ! %g3 = TTE without PA/RA field | |
522 | ! %g6 = guest struct | |
523 | ||
524 | ! FIXME: This test to be subsumed when we fix the RA mappings | |
525 | ! for multiple RA blocks | |
526 | ! %g1 guest struct | |
527 | ! %g2 real address | |
528 | ||
529 | set GUEST_LDC_MAPIN_BASERA, %g7 | |
530 | ldx [ %g6 + %g7 ], %g5 | |
531 | subcc %g2, %g5, %g5 | |
532 | bneg,pn %xcc, herr_noraddr | |
533 | nop | |
534 | set GUEST_LDC_MAPIN_SIZE, %g7 | |
535 | ldx [ %g6 + %g7 ], %g7 | |
536 | subcc %g5, %g7, %g0 | |
537 | ! check regs passed in to mapin_ra: | |
538 | bneg,pt %xcc, ldc_map_addr_api | |
539 | nop | |
540 | ||
541 | ENTRY_NP(hcall_mmu_map_addr_ra_not_found) | |
542 | ba,a herr_noraddr | |
543 | nop | |
544 | SET_SIZE(hcall_mmu_map_addr) | |
545 | ||
546 | ||
547 | /* | |
548 | * mmu_unmap_addr | |
549 | * | |
550 | * arg0 vaddr (%o0) | |
551 | * arg1 ctx (%o1) | |
552 | * arg2 flags (%o2) | |
553 | * -- | |
554 | * ret0 status (%o0) | |
555 | */ | |
556 | ENTRY_NP(hcall_mmu_unmap_addr) | |
557 | #if MAPTR /* { FIXME: */ | |
558 | PRINT("mmu_unmap_addr: va=0x") | |
559 | PRINTX(%o0) | |
560 | PRINT(" ctx=0x") | |
561 | PRINTX(%o1) | |
562 | PRINT(" flags=0x") | |
563 | PRINTX(%o2) | |
564 | PRINT("\r\n") | |
565 | 1: | |
566 | #endif /* } */ | |
567 | #ifdef STRICT_API | |
568 | CHECK_VA_CTX(%o0, %o1, herr_inval, %g2) | |
569 | CHECK_MMU_FLAGS(%o2, herr_inval) | |
570 | #endif /* STRICT_API */ | |
571 | mov MMU_PCONTEXT, %g1 | |
572 | set (NCTXS - 1), %g2 ! 8K page mask | |
573 | andn %o0, %g2, %g2 | |
574 | ldxa [%g1]ASI_MMU, %g3 ! save current primary ctx | |
575 | stxa %o1, [%g1]ASI_MMU ! switch to new ctx | |
576 | btst MAP_ITLB, %o2 | |
577 | bz,pn %xcc, 1f | |
578 | btst MAP_DTLB, %o2 | |
579 | stxa %g0, [%g2]ASI_IMMU_DEMAP | |
580 | 1: bz,pn %xcc, 2f | |
581 | nop | |
582 | stxa %g0, [%g2]ASI_DMMU_DEMAP | |
583 | 2: stxa %g3, [%g1]ASI_MMU ! restore original primary ctx | |
584 | HCALL_RET(EOK) | |
585 | SET_SIZE(hcall_mmu_unmap_addr) | |
586 | ||
587 | ||
588 | /* | |
589 | * mmu_demap_page | |
590 | * | |
591 | * arg0/1 cpulist (%o0/%o1) | |
592 | * arg2 vaddr (%o2) | |
593 | * arg3 ctx (%o3) | |
594 | * arg4 flags (%o4) | |
595 | * -- | |
596 | * ret0 status (%o0) | |
597 | */ | |
598 | ENTRY_NP(hcall_mmu_demap_page) | |
599 | orcc %o0, %o1, %g0 | |
600 | bnz,pn %xcc, herr_notsupported ! cpulist not yet supported | |
601 | #ifdef STRICT_API | |
602 | nop | |
603 | CHECK_VA_CTX(%o2, %o3, herr_inval, %g2) | |
604 | CHECK_MMU_FLAGS(%o4, herr_inval) | |
605 | #endif /* STRICT_API */ | |
606 | mov MMU_PCONTEXT, %g1 | |
607 | set (NCTXS - 1), %g2 | |
608 | andn %o2, %g2, %g2 | |
609 | ldxa [%g1]ASI_MMU, %g3 | |
610 | stxa %o3, [%g1]ASI_MMU | |
611 | btst MAP_ITLB, %o4 | |
612 | bz,pn %xcc, 1f | |
613 | btst MAP_DTLB, %o4 | |
614 | stxa %g0, [%g2]ASI_IMMU_DEMAP | |
615 | 1: bz,pn %xcc, 2f | |
616 | nop | |
617 | stxa %g0, [%g2]ASI_DMMU_DEMAP | |
618 | 2: stxa %g3, [%g1]ASI_MMU ! restore primary ctx | |
619 | HCALL_RET(EOK) | |
620 | SET_SIZE(hcall_mmu_demap_page) | |
621 | ||
622 | ||
623 | /* | |
624 | * mmu_demap_ctx | |
625 | * | |
626 | * arg0/1 cpulist (%o0/%o1) | |
627 | * arg2 ctx (%o2) | |
628 | * arg3 flags (%o3) | |
629 | * -- | |
630 | * ret0 status (%o0) | |
631 | */ | |
632 | ENTRY_NP(hcall_mmu_demap_ctx) | |
633 | orcc %o0, %o1, %g0 | |
634 | bnz,pn %xcc, herr_notsupported ! cpulist not yet supported | |
635 | #ifdef STRICT_API | |
636 | nop | |
637 | CHECK_CTX(%o2, herr_inval, %g2) | |
638 | CHECK_MMU_FLAGS(%o3, herr_inval) | |
639 | #endif /* STRICT_API */ | |
640 | set TLB_DEMAP_CTX_TYPE, %g3 | |
641 | mov MMU_PCONTEXT, %g2 | |
642 | ldxa [%g2]ASI_MMU, %g7 | |
643 | stxa %o2, [%g2]ASI_MMU | |
644 | btst MAP_ITLB, %o3 | |
645 | bz,pn %xcc, 1f | |
646 | btst MAP_DTLB, %o3 | |
647 | stxa %g0, [%g3]ASI_IMMU_DEMAP | |
648 | 1: bz,pn %xcc, 2f | |
649 | nop | |
650 | stxa %g0, [%g3]ASI_DMMU_DEMAP | |
651 | 2: stxa %g7, [%g2]ASI_MMU ! restore primary ctx | |
652 | HCALL_RET(EOK) | |
653 | SET_SIZE(hcall_mmu_demap_ctx) | |
654 | ||
655 | ||
656 | /* | |
657 | * mmu_demap_all | |
658 | * | |
659 | * arg0/1 cpulist (%o0/%o1) | |
660 | * arg2 flags (%o2) | |
661 | * -- | |
662 | * ret0 status (%o0) | |
663 | */ | |
664 | ENTRY_NP(hcall_mmu_demap_all) | |
665 | orcc %o0, %o1, %g0 | |
666 | bnz,pn %xcc, herr_notsupported ! cpulist not yet supported | |
667 | #ifdef STRICT_API | |
668 | nop | |
669 | CHECK_MMU_FLAGS(%o2, herr_inval) | |
670 | #endif /* STRICT_API */ | |
671 | set TLB_DEMAP_ALL_TYPE, %g3 | |
672 | btst MAP_ITLB, %o2 | |
673 | bz,pn %xcc, 1f | |
674 | btst MAP_DTLB, %o2 | |
675 | stxa %g0, [%g3]ASI_IMMU_DEMAP | |
676 | 1: bz,pn %xcc, 2f | |
677 | nop | |
678 | stxa %g0, [%g3]ASI_DMMU_DEMAP | |
679 | 2: HCALL_RET(EOK) | |
680 | SET_SIZE(hcall_mmu_demap_all) | |
681 | ||
682 | ||
683 | /* | |
684 | * mmu_map_perm_addr | |
685 | * | |
686 | * arg0 vaddr (%o0) | |
687 | * arg1 context (%o1) must be zero | |
688 | * arg2 tte (%o2) | |
689 | * arg3 flags (%o3) | |
690 | * -- | |
691 | * ret0 status (%o0) | |
692 | */ | |
693 | ENTRY_NP(hcall_mmu_map_perm_addr) | |
694 | #if MAPTR /* { FIXME: */ | |
695 | PRINT("mmu_map_perm_addr: va=0x") | |
696 | PRINTX(%o0) | |
697 | PRINT(" ctx=0x") | |
698 | PRINTX(%o1) | |
699 | PRINT(" tte=0x") | |
700 | PRINTX(%o2) | |
701 | PRINT(" flags=0x") | |
702 | PRINTX(%o3) | |
703 | PRINT("\r\n") | |
704 | 1: | |
705 | #endif /* } */ | |
706 | ||
707 | brnz,pn %o1, herr_inval | |
708 | nop | |
709 | VCPU_GUEST_STRUCT(%g1, %g6) | |
710 | ||
711 | CHECK_VA_CTX(%o0, %o1, herr_inval, %g2) | |
712 | CHECK_MMU_FLAGS(%o3, herr_inval) | |
713 | ||
714 | ! Fail if tte isn't valid | |
715 | brgez,pn %o2, herr_inval | |
716 | nop | |
717 | ||
718 | ! Fail if flags indicate ITLB, but no execute perm | |
719 | btst MAP_ITLB, %o3 | |
720 | bz,pn %xcc, 1f | |
721 | nop | |
722 | #if 1 /* FIXME: Hack for broken OBP */ | |
723 | or %o2, TTE_X, %o2 | |
724 | #endif | |
725 | btst TTE_X, %o2 | |
726 | bz,pn %xcc, herr_inval | |
727 | nop | |
728 | 1: | |
729 | ! extract sz from tte | |
730 | TTE_SIZE(%o2, %g4, %g2, herr_badpgsz) | |
731 | sub %g4, 1, %g5 ! %g5 page mask | |
732 | ||
733 | ! Fail if page-offset bits aren't zero | |
734 | btst %g5, %o0 | |
735 | bnz,pn %xcc, herr_inval | |
736 | .empty | |
737 | ||
738 | ! %g1 = cpu struct | |
739 | ! %g4 = page size | |
740 | ! %g5 = page size mask | |
741 | ! %g6 = guest struct | |
742 | ||
743 | ! extract ra from tte | |
744 | sllx %o2, 64 - 56, %g2 | |
745 | srlx %g2, 64 - 56 + 13, %g2 | |
746 | sllx %g2, 13, %g2 ! %g2 real address | |
747 | xor %o2, %g2, %g3 ! %g3 orig tte with ra field zeroed | |
748 | ||
749 | #ifdef STRICT_API | |
750 | andcc %g2, %g5, %g0 | |
751 | bne,pn %xcc, herr_inval ! if RA not page size aligned | |
752 | nop | |
753 | #else | |
754 | andn %g2, %g5, %g2 ! Align RA to page size | |
755 | #endif | |
756 | ||
757 | ! %g1 = cpu struct | |
758 | ! %g2 = real address | |
759 | ! %g3 = TTE with RA field zeroed | |
760 | ! %g4 = page size | |
761 | ! %g5 = page size mask | |
762 | ! %g6 = guest struct | |
763 | ||
764 | RA2PA_RANGE_CONV_UNK_SIZE(%g6, %g2, %g4, herr_noraddr, %g5, %g7) | |
765 | mov %g7, %g2 ! %g2 paddr | |
766 | or %g3, %g2, %o2 | |
767 | ||
768 | ! Force clear TTE lock bit | |
769 | CLEAR_TTE_LOCK_BIT(%o2, %g5) | |
770 | ||
771 | ! %o2 = swizzled tte | |
772 | ! | |
773 | ! %g1 = cpu struct | |
774 | ! %g4 = page size | |
775 | ! %g6 = guest struct | |
776 | ||
777 | sub %g4, 1, %g3 ! page mask | |
778 | ||
779 | add %g6, GUEST_PERM_MAPPINGS_LOCK, %g7 | |
780 | SPINLOCK_ENTER(%g7, %g2, %g5) | |
781 | ||
782 | ! %o2 = swizzled tte | |
783 | ! | |
784 | ! %g1 = cpu struct | |
785 | ! %g3 = page mask | |
786 | ! %g6 = guest struct | |
787 | ! %g7 = spin lock | |
788 | ||
789 | /* Search for existing perm mapping */ | |
790 | add %g6, GUEST_PERM_MAPPINGS, %g1 | |
791 | mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g6 | |
792 | ||
793 | ! %o2 = swizzled tte | |
794 | ! | |
795 | ! %g1 = perm mappings list | |
796 | ! %g3 = page mask | |
797 | ! %g6 = offset | |
798 | ! %g7 = spin lock addr | |
799 | ||
800 | /* | |
801 | * Skim mapping entries for potential conflict. | |
802 | * NOTE: Start at end of array so we prefer to fill | |
803 | * empty slots earlier on in the perm-mapping array. | |
804 | * | |
805 | * for (i=NPERMAPPINGS-1; i>=0 i--) { | |
806 | * if (((addr & table[i].mask)^table[i].tag) & mask) == 0) { | |
807 | * matching entry ... write over it. | |
808 | * } | |
809 | * } | |
810 | */ | |
811 | ||
812 | mov -1, %o1 | |
813 | ||
814 | .perm_map_loop: | |
815 | ldda [ %g1 + %g6 ] ASI_QUAD_LDD, %g4 ! Ld Tag (g4) + TTE (g5) | |
816 | ||
817 | ! Record slot if empty | |
818 | brgez,a,pn %g5, .pml_next_loop | |
819 | mov %g6, %o1 ! del-slot executed if branch taken | |
820 | ||
821 | and %g5, TTE_SZ_MASK, %g2 | |
822 | add %g2, %g2, %g7 | |
823 | add %g2, %g7, %g7 ! Mult by 3 | |
824 | add %g7, 13, %g7 ! Add 13 | |
825 | mov 1, %g2 | |
826 | sllx %g2, %g7, %g2 ! Shift to get bytes of page size | |
827 | sub %g2, 1, %g2 ! Page mask for TTE retrieved | |
828 | ||
829 | xor %o0, %g4, %g7 | |
830 | andn %g7, %g2, %g7 | |
831 | andncc %g7, %g3, %g7 ! Check for tag match | |
832 | ||
833 | bne,pt %xcc, .pml_next_loop | |
834 | nop | |
835 | ||
836 | ! Brute force demap both I & D Tlbs: | |
837 | ! FIXME: really only need to do pages ... | |
838 | mov MMU_PCONTEXT, %g7 | |
839 | ldxa [%g7]ASI_MMU, %g2 ! save current primary ctx | |
840 | stxa %g0, [%g7]ASI_MMU ! switch to ctx0 | |
841 | stxa %g0, [%o0]ASI_IMMU_DEMAP | |
842 | stxa %g0, [%o0]ASI_DMMU_DEMAP | |
843 | stxa %g2, [%g7]ASI_MMU ! restore original primary ctx | |
844 | ||
845 | ba,pt %xcc, .pml_match | |
846 | mov %g6, %o1 | |
847 | ||
848 | .pml_next_loop: | |
849 | sub %g6, MAPPING_SIZE, %g6 | |
850 | brgez,pt %g6, .perm_map_loop | |
851 | nop | |
852 | ||
853 | .pml_match: | |
854 | cmp %o1, -1 | |
855 | bne,pn %xcc, 2f | |
856 | nop | |
857 | ||
858 | GUEST_STRUCT(%g1) | |
859 | add %g1, GUEST_PERM_MAPPINGS_LOCK, %g6 | |
860 | SPINLOCK_EXIT(%g6) | |
861 | ba,pt %xcc, herr_toomany | |
862 | nop | |
863 | 2: | |
864 | ||
865 | ! %o2 = swizzled tte | |
866 | ! | |
867 | ! %g1 = perm mappings list | |
868 | ! %g3 = page mask | |
869 | ! %g6 = offset of matching or free entry | |
870 | ||
871 | ! Fill in the new data. | |
872 | ||
873 | add %g1, %o1, %g6 | |
874 | membar #StoreStore | #LoadStore | |
875 | ||
876 | ! Now determine the offset and bit that needs setting for this vcpu | |
877 | ! within the guest. | |
878 | ||
879 | VCPU_STRUCT(%g1) | |
880 | ||
881 | ! %g1 = cpu struct | |
882 | ! %g3 = page mask | |
883 | ! %g6 = mapping entry | |
884 | ||
885 | ||
886 | /* Calculate this cpu's cpuset mask */ | |
887 | ldub [%g1 + CPU_VID], %g2 | |
888 | and %g2, MAPPING_XWORD_MASK, %g3 | |
889 | mov 1, %g4 | |
890 | sllx %g4, %g3, %g3 | |
891 | srlx %g2, MAPPING_XWORD_SHIFT, %g2 | |
892 | sllx %g2, MAPPING_XWORD_BYTE_SHIFT_BITS, %g2 | |
893 | add %g6, %g2, %g2 ! Just add offset to this for I or d cpuset arrays | |
894 | ||
895 | andcc %o3, MAP_ITLB, %g0 | |
896 | beq,pn %xcc, .perm_map_testd | |
897 | nop | |
898 | ldx [ %g2 + MAPPING_ICPUSET ], %g4 | |
899 | or %g3, %g4, %g4 | |
900 | stx %g4, [ %g2 + MAPPING_ICPUSET ] | |
901 | ||
902 | .perm_map_testd: | |
903 | andcc %o3, MAP_DTLB, %g0 | |
904 | beq,pn %xcc, .perm_map_done | |
905 | nop | |
906 | ldx [ %g2 + MAPPING_DCPUSET ], %g4 | |
907 | or %g3, %g4, %g4 | |
908 | stx %g4, [ %g2 + MAPPING_DCPUSET ] | |
909 | ||
910 | .perm_map_done: | |
911 | ||
912 | stx %o0, [ %g6 + MAPPING_VA ] | |
913 | stx %o2, [ %g6 + MAPPING_TTE ] ! Finally store the TTE | |
914 | ||
915 | membar #StoreStore | #StoreLoad | |
916 | ||
917 | VCPU2GUEST_STRUCT(%g1, %g1) | |
918 | ||
919 | add %g1, GUEST_PERM_MAPPINGS_LOCK, %g7 | |
920 | ||
921 | SPINLOCK_EXIT(%g7) | |
922 | HCALL_RET(EOK) | |
923 | SET_SIZE(hcall_mmu_map_perm_addr) | |
924 | ||
925 | ||
926 | /* | |
927 | * mmu_unmap_perm_addr | |
928 | * | |
929 | * arg0 vaddr (%o0) | |
930 | * arg1 ctx (%o1) | |
931 | * arg2 flags (%o2) | |
932 | * -- | |
933 | * ret0 status (%o0) | |
934 | */ | |
935 | ||
936 | /* | |
937 | * FIXME: Need to make this a subroutine call so it can | |
938 | * be performed as part of the guest and CPU exit clean up. | |
939 | */ | |
940 | ENTRY_NP(hcall_mmu_unmap_perm_addr) | |
941 | brnz,pn %o1, herr_inval | |
942 | nop | |
943 | CHECK_VA_CTX(%o0, %o1, herr_inval, %g2) | |
944 | CHECK_MMU_FLAGS(%o2, herr_inval) | |
945 | ||
946 | GUEST_STRUCT(%g2) | |
947 | ||
948 | add %g2, GUEST_PERM_MAPPINGS_LOCK, %g7 | |
949 | SPINLOCK_ENTER(%g7, %g3, %g5) | |
950 | ||
951 | ! %g2 = guest struct | |
952 | ! %g7 = spin lock | |
953 | ||
954 | /* Search for existing perm mapping */ | |
955 | add %g2, GUEST_PERM_MAPPINGS, %g1 | |
956 | mov ((NPERMMAPPINGS - 1) * MAPPING_SIZE), %g6 | |
957 | ||
958 | ! %g1 = perm mappings list | |
959 | ! %g2 = guest struct | |
960 | ! %g6 = offset | |
961 | ! %g7 = spin lock addr | |
962 | ||
963 | /* | |
964 | * Skim mapping entries for potential match | |
965 | ||
966 | * for (i=NPERMAPPINGS-1; i>=0 i--) { | |
967 | * if ((addr & table[i].mask)^table[i].tag) == 0) { | |
968 | * matching entry ... invalidate it | |
969 | * } | |
970 | * } | |
971 | */ | |
972 | ||
973 | .perm_unmap_loop: | |
974 | ldda [ %g1 + %g6 ] ASI_QUAD_LDD, %g4 ! Ld Tag (g4) + TTE (g5) | |
975 | ||
976 | ! Record slot if empty | |
977 | brgez,a,pn %g5, .puml_next_loop | |
978 | nop | |
979 | ||
980 | and %g5, TTE_SZ_MASK, %g5 | |
981 | add %g5, %g5, %g3 | |
982 | add %g5, %g3, %g3 ! Multiply by 3 | |
983 | add %g3, 13, %g3 ! Add 13 | |
984 | mov 1, %g5 | |
985 | sllx %g5, %g3, %g3 ! Shift to get bytes of page size | |
986 | sub %g3, 1, %g3 ! Page mask for TTE retrieved | |
987 | ||
988 | xor %o0, %g4, %g5 | |
989 | andncc %g5, %g3, %g0 ! Check for tag match | |
990 | ||
991 | be,pn %xcc, .puml_match | |
992 | nop | |
993 | ||
994 | .puml_next_loop: | |
995 | brgz,pt %g6, .perm_unmap_loop | |
996 | sub %g6, MAPPING_SIZE, %g6 | |
997 | ||
998 | ! %g2 = guest structure | |
999 | ! Bail out no match was found | |
1000 | add %g2, GUEST_PERM_MAPPINGS_LOCK, %g7 | |
1001 | SPINLOCK_EXIT(%g7) | |
1002 | ba,pt %xcc, herr_nomap | |
1003 | nop | |
1004 | ||
1005 | .puml_match: | |
1006 | ! %g1 = perm mappings list | |
1007 | ! %g2 = guest struct | |
1008 | ! %g6 = offset of matching entry | |
1009 | ||
1010 | ! NOTE: We assume that the overlap match on insert is good enough that | |
1011 | ! there can never be two or more matching entries in the mapping table | |
1012 | ||
1013 | add %g1, %g6, %g6 | |
1014 | membar #StoreStore | #LoadStore | |
1015 | ||
1016 | ! Now determine the offset and bit that needs setting for this vcpu | |
1017 | ! within the guest. | |
1018 | ||
1019 | VCPU_STRUCT(%g1) | |
1020 | ||
1021 | ! %g1 = cpu struct | |
1022 | ! %g2 = guest struct | |
1023 | ! %g6 = mapping entry | |
1024 | ||
1025 | ||
1026 | ! | |
1027 | ! The remaining logic is as follows: | |
1028 | ! For both the I & D cases determine if we need to clear the | |
1029 | ! presence bits in the active cpusets, and perform a demap on the | |
1030 | ! local CPU (always is the simplest case since the other strands | |
1031 | ! will re-load anyway) | |
1032 | ! | |
1033 | ||
1034 | /* Calculate this cpu's cpuset mask */ | |
1035 | ldub [%g1 + CPU_VID], %g3 | |
1036 | and %g3, MAPPING_XWORD_MASK, %g5 | |
1037 | mov 1, %g4 | |
1038 | sllx %g4, %g5, %g4 | |
1039 | srlx %g3, MAPPING_XWORD_SHIFT, %g5 | |
1040 | sllx %g5, MAPPING_XWORD_BYTE_SHIFT_BITS, %g5 | |
1041 | add %g6, %g5, %g5 ! Just add offset to this for I or d cpuset arrays | |
1042 | ||
1043 | ! %g1 = cpu struct | |
1044 | ! %g2 = guest struct | |
1045 | ! %g3 = CPU vid | |
1046 | ! %g4 = xword bit for vcpu | |
1047 | ! %g5 = cpu xword offset into permmap | |
1048 | ! %g6 = permmap entry | |
1049 | ||
1050 | andcc %o2, MAP_ITLB, %g0 | |
1051 | beq,pn %xcc, .perm_umap_testd | |
1052 | nop | |
1053 | ldx [ %g5 + MAPPING_ICPUSET ], %g7 | |
1054 | andn %g7, %g4, %g7 | |
1055 | stx %g7, [ %g5 + MAPPING_ICPUSET ] | |
1056 | ||
1057 | mov MMU_PCONTEXT, %g7 | |
1058 | ldxa [%g7]ASI_MMU, %o1 ! save current primary ctx | |
1059 | stxa %g0, [%g7]ASI_MMU ! switch to ctx0 | |
1060 | stxa %g0, [%o0]ASI_IMMU_DEMAP | |
1061 | stxa %o1, [%g7]ASI_MMU ! restore original primary ctx | |
1062 | ||
1063 | .perm_umap_testd: | |
1064 | andcc %o2, MAP_DTLB, %g0 | |
1065 | beq,pn %xcc, .perm_umap_finish | |
1066 | nop | |
1067 | ldx [ %g5 + MAPPING_DCPUSET ], %g7 | |
1068 | andn %g7, %g4, %g7 | |
1069 | stx %g7, [ %g5 + MAPPING_DCPUSET ] | |
1070 | ||
1071 | mov MMU_PCONTEXT, %g7 | |
1072 | ldxa [%g7]ASI_MMU, %o1 ! save current primary ctx | |
1073 | stxa %g0, [%g7]ASI_MMU ! switch to ctx0 | |
1074 | stxa %g0, [%o0]ASI_DMMU_DEMAP | |
1075 | stxa %o1, [%g7]ASI_MMU ! restore original primary ctx | |
1076 | ||
1077 | .perm_umap_finish: | |
1078 | ||
1079 | ! %g1 = cpu struct | |
1080 | ! %g2 = guest struct | |
1081 | ! %g6 = permmap entry | |
1082 | ! | |
1083 | ! Final step... if all the CPU set entries are gone | |
1084 | ! then clean out the mapping entry itself | |
1085 | ||
1086 | mov (NVCPU_XWORDS-1)*MAPPING_XWORD_SIZE, %g4 | |
1087 | 1: | |
1088 | add %g4, %g6, %g7 | |
1089 | ldx [ %g7 + MAPPING_ICPUSET ], %g5 | |
1090 | ldx [ %g7 + MAPPING_DCPUSET ], %g7 | |
1091 | orcc %g5, %g7, %g0 | |
1092 | ! Bail out if we find a non-zero entry | |
1093 | bne,pn %xcc, .perm_umap_done | |
1094 | nop | |
1095 | brgz %g4, 1b | |
1096 | sub %g4, MAPPING_XWORD_SIZE, %g4 | |
1097 | ||
1098 | stx %g0, [ %g6 + MAPPING_TTE ] ! Invalidate first | |
1099 | stx %g0, [ %g6 + MAPPING_VA ] ! For sanity cleanse tag | |
1100 | ||
1101 | .perm_umap_done: | |
1102 | membar #StoreStore | #StoreLoad | |
1103 | ||
1104 | VCPU2GUEST_STRUCT(%g1, %g1) | |
1105 | ||
1106 | add %g1, GUEST_PERM_MAPPINGS_LOCK, %g7 | |
1107 | ||
1108 | SPINLOCK_EXIT(%g7) | |
1109 | HCALL_RET(EOK) | |
1110 | SET_SIZE(hcall_mmu_unmap_perm_addr) | |
1111 | ||
1112 | ||
1113 | #ifdef DEBUG | |
1114 | ||
1115 | /* | |
1116 | * mmu_perm_addr_info | |
1117 | * | |
1118 | * arg0 buffer (%o0) | |
1119 | * arg1 nentries (%o1) | |
1120 | * -- | |
1121 | * ret0 status (%o0) | |
1122 | * ret1 nentries (%o1) | |
1123 | */ | |
1124 | ENTRY_NP(hcall_mmu_perm_addr_info) | |
1125 | HCALL_RET(ENOTSUPPORTED) | |
1126 | SET_SIZE(hcall_mmu_perm_addr_info) | |
1127 | ||
1128 | #endif /* DEBUG */ | |
1129 | ||
1130 | ||
1131 | /* | |
1132 | * niagara_mmustat_conf | |
1133 | * | |
1134 | * arg0 mmustat buffer ra (%o0) | |
1135 | * -- | |
1136 | * ret0 status (%o0) | |
1137 | * ret1 old mmustat buffer ra (%o1) | |
1138 | */ | |
1139 | ENTRY_NP(hcall_niagara_mmustat_conf) | |
1140 | btst MMUSTAT_AREA_ALIGN - 1, %o0 ! check alignment | |
1141 | bnz,pn %xcc, herr_badalign | |
1142 | VCPU_GUEST_STRUCT(%g1, %g4) | |
1143 | brz,a,pn %o0, 1f | |
1144 | mov 0, %g2 | |
1145 | RA2PA_RANGE_CONV(%g4, %o0, MMUSTAT_AREA_SIZE, herr_noraddr, %g3, %g2) | |
1146 | 1: | |
1147 | ldx [%g1 + CPU_MMUSTAT_AREA_RA], %o1 | |
1148 | stx %o0, [%g1 + CPU_MMUSTAT_AREA_RA] | |
1149 | stx %g2, [%g1 + CPU_MMUSTAT_AREA] | |
1150 | HCALL_RET(EOK) | |
1151 | SET_SIZE(hcall_niagara_mmustat_conf) | |
1152 | ||
1153 | ||
1154 | /* | |
1155 | * niagara_mmustat_info | |
1156 | * | |
1157 | * -- | |
1158 | * ret0 status (%o0) | |
1159 | * ret1 mmustat buffer ra (%o1) | |
1160 | */ | |
1161 | ENTRY_NP(hcall_niagara_mmustat_info) | |
1162 | VCPU_STRUCT(%g1) | |
1163 | ldx [%g1 + CPU_MMUSTAT_AREA_RA], %o1 | |
1164 | HCALL_RET(EOK) | |
1165 | SET_SIZE(hcall_niagara_mmustat_info) |