Commit | Line | Data |
---|---|---|
920dae64 AT |
1 | /* |
2 | * ========== Copyright Header Begin ========================================== | |
3 | * | |
4 | * Hypervisor Software File: ldc.h | |
5 | * | |
6 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. | |
7 | * | |
8 | * - Do no alter or remove copyright notices | |
9 | * | |
10 | * - Redistribution and use of this software in source and binary forms, with | |
11 | * or without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistribution of source code must retain the above copyright notice, | |
15 | * this list of conditions and the following disclaimer. | |
16 | * | |
17 | * - Redistribution in binary form must reproduce the above copyright notice, | |
18 | * this list of conditions and the following disclaimer in the | |
19 | * documentation and/or other materials provided with the distribution. | |
20 | * | |
21 | * Neither the name of Sun Microsystems, Inc. or the names of contributors | |
22 | * may be used to endorse or promote products derived from this software | |
23 | * without specific prior written permission. | |
24 | * | |
25 | * This software is provided "AS IS," without a warranty of any kind. | |
26 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | |
27 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | |
28 | * PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN | |
29 | * MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR | |
30 | * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR | |
31 | * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN | |
32 | * OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR | |
33 | * FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE | |
34 | * DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, | |
35 | * ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF | |
36 | * SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. | |
37 | * | |
38 | * You acknowledge that this software is not designed, licensed or | |
39 | * intended for use in the design, construction, operation or maintenance of | |
40 | * any nuclear facility. | |
41 | * | |
42 | * ========== Copyright Header End ============================================ | |
43 | */ | |
44 | /* | |
45 | * Copyright 2007 Sun Microsystems, Inc. All rights reserved. | |
46 | * Use is subject to license terms. | |
47 | */ | |
48 | ||
49 | #ifndef _LDC_H | |
50 | #define _LDC_H | |
51 | ||
52 | #pragma ident "@(#)ldc.h 1.11 07/07/17 SMI" | |
53 | ||
54 | #ifdef __cplusplus | |
55 | extern "C" { | |
56 | #endif | |
57 | ||
58 | #include <guest.h> | |
59 | #include <sun4v/queue.h> | |
60 | #include <platform/ldc.h> | |
61 | ||
62 | /* | |
63 | * LDC Endpoint types | |
64 | */ | |
65 | #define LDC_GUEST_ENDPOINT 0x0 | |
66 | #define LDC_HV_ENDPOINT 0x1 | |
67 | #define LDC_SP_ENDPOINT 0x2 | |
68 | ||
69 | /* | |
70 | * Private LDC channels has a service associated with it | |
71 | */ | |
72 | #define LDC_HVCTL_SVC 0x1 | |
73 | #define LDC_CONSOLE_SVC 0x2 | |
74 | ||
75 | /* | |
76 | * Maximum number of LDC packets to copy in one hcall (to avoid keeping | |
77 | * the CPU in HV too long). Let's say 8K worth of packets: | |
78 | */ | |
79 | #define LDC_MAX_PKT_COPY ((8 * 1024) / Q_EL_SIZE) | |
80 | ||
81 | /* | |
82 | * Size (in number of queue entries) of console TX/RX queues | |
83 | */ | |
84 | #define LDC_CONS_QSIZE 128 | |
85 | ||
86 | /* | |
87 | * HV LDC Map Table Entry | |
88 | * | |
89 | * 6 5 | |
90 | * |3 6| psz| 13| 11| 4| 0 | | |
91 | * +------+-------------------------------+-----+---+----------+------+ | |
92 | * | rsvd | rpfn | 0 | 0 | perms | pgsz | word 0 | |
93 | * +------+-------------------------------+-----+---+----------+------+ | |
94 | * | Hypervisor invalidation cookie slot | word 1 | |
95 | * +------------------------------------------------------------------+ | |
96 | */ | |
97 | ||
98 | ||
99 | #define LDC_MTE_PGSZ_SHIFT (0) | |
100 | #define LDC_MTE_PGSZ_MASK (0xf) | |
101 | ||
102 | #define LDC_MTE_RSVD_BITS 8 | |
103 | ||
104 | #define LDC_MTE_PERM_RD_BIT (LDC_MAP_R_BIT + LDC_MTE_PERM_SHIFT) | |
105 | #define LDC_MTE_PERM_WR_BIT (LDC_MAP_W_BIT + LDC_MTE_PERM_SHIFT) | |
106 | #define LDC_MTE_PERM_EX_BIT (LDC_MAP_X_BIT + LDC_MTE_PERM_SHIFT) | |
107 | #define LDC_MTE_PERM_IORD_BIT (LDC_MAP_IOR_BIT + LDC_MTE_PERM_SHIFT) | |
108 | #define LDC_MTE_PERM_IOWR_BIT (LDC_MAP_IOW_BIT + LDC_MTE_PERM_SHIFT) | |
109 | #define LDC_MTE_PERM_CPRD_BIT (LDC_MAP_COPY_IN_BIT + LDC_MTE_PERM_SHIFT) | |
110 | #define LDC_MTE_PERM_CPWR_BIT (LDC_MAP_COPY_OUT_BIT + LDC_MTE_PERM_SHIFT) | |
111 | ||
112 | #define LDC_MTE_PERM_SHIFT 4 | |
113 | #define LDC_MTE_PERM_MASK ((1 << (LDC_MTE_PERM_CPWR_BIT - \ | |
114 | LDC_MTE_PERM_RD_BIT + 1)) - 1) | |
115 | ||
116 | #define LDC_MTE_RA_SHIFT 13 | |
117 | ||
118 | #define LDC_MAX_MAP_TABLE_ENTRIES (1 << 30) | |
119 | #define LDC_MIN_MAP_TABLE_ENTRIES 2 | |
120 | ||
121 | #define LDC_NUM_MAPINS_BITS 12 /* Modest for now */ | |
122 | #define LDC_NUM_MAPINS (1LL << LDC_NUM_MAPINS_BITS) | |
123 | ||
124 | /* | |
125 | * NOTE: we should be careful and ensure that we get | |
126 | * this in a segment | |
127 | */ | |
128 | #define LDC_MAPIN_BASERA (0x10LL << 34) | |
129 | #define LDC_MAPIN_RASIZE (LDC_NUM_MAPINS << LARGEST_PG_SIZE_BITS) | |
130 | ||
131 | ||
132 | /* | |
133 | * LDC Cookie address format | |
134 | * | |
135 | * 6 6 m+n | |
136 | * |3| 0| | m| 0| | |
137 | * +-+------+----------+-------------------+-------------------+ | |
138 | * |X|pgszc | rsvd | table_idx | page_offset | | |
139 | * +-+------+----------+-------------------+-------------------+ | |
140 | */ | |
141 | #define LDC_COOKIE_PGSZC_MASK 0x7 | |
142 | #define LDC_COOKIE_PGSZC_SHIFT 60 | |
143 | ||
144 | /* | |
145 | * For the internal map table entry | |
146 | * we assign MMU control bits using the following constants | |
147 | * for the MMU_MAP bit-mask - assuming a 64bit word to hold the flags | |
148 | */ | |
149 | ||
150 | #define MIE_VA_MMU_SHIFT 0 | |
151 | #define MIE_RA_MMU_SHIFT 8 | |
152 | #define MIE_IO_MMU_SHIFT 16 | |
153 | #define MIE_CPU_TO_MMU_SHIFT 2 /* 4 strands per MMU */ | |
154 | ||
155 | ||
156 | /* | |
157 | * For now we will have 64 channels in the HV | |
158 | * and 8 channels between the HV and SP | |
159 | * NOTE: keep this in sync with Zeus' PRI | |
160 | */ | |
161 | #define MAX_HV_LDC_CHANNELS 1 | |
162 | #define MAX_SP_LDC_CHANNELS 14 | |
163 | ||
164 | ||
165 | #define IVDR_THREAD 8 | |
166 | /* | |
167 | * Macro to get the mapin table entry from the RA offset | |
168 | * ra_offset = (ra - rabase) | |
169 | * | |
170 | * Parameters: | |
171 | * guest_endpt (unmodified) - guest endpoint stuct pointer | |
172 | * ra_offset (modified) - RA offset into mapin region | |
173 | * mapin_entry (return value) - addr of mapin entry | |
174 | */ | |
175 | /* BEGIN CSTYLED */ | |
176 | #define GET_MAPIN_ENTRY(guest_endpt, ra_offset, mapin_entry) \ | |
177 | srlx ra_offset, LARGEST_PG_SIZE_BITS, mapin_entry ;\ | |
178 | mulx mapin_entry, LDC_MAPIN_SIZE, mapin_entry ;\ | |
179 | set GUEST_LDC_MAPIN, ra_offset ;\ | |
180 | add ra_offset, guest_endpt, ra_offset ;\ | |
181 | add mapin_entry, ra_offset, mapin_entry | |
182 | /* END CSTYLED */ | |
183 | ||
184 | /* | |
185 | * Macro to get the LDC IOMMU PA from the RA | |
186 | * | |
187 | * Parameters: | |
188 | * guest (unmodified) - guest endpoint stuct pointer | |
189 | * pa (unmodified) - RA | |
190 | * paddr scr1 (modified) - scratch registers | |
191 | * Results: | |
192 | * paddr - Physical Address | |
193 | */ | |
194 | /* BEGIN CSTYLED */ | |
195 | #define LDC_IOMMU_GET_PA(guest, ra, paddr, scr1, no_ra_lbl, no_perm_lbl) \ | |
196 | set GUEST_LDC_MAPIN_BASERA, paddr ;\ | |
197 | ldx [guest + paddr], scr1 ;\ | |
198 | subcc ra, scr1, scr1 ;\ | |
199 | bneg,pn %xcc, no_ra_lbl ;\ | |
200 | .empty ;\ | |
201 | set GUEST_LDC_MAPIN_SIZE, paddr ;\ | |
202 | ldx [guest + paddr], paddr ;\ | |
203 | cmp scr1, paddr ;\ | |
204 | bgu,pt %xcc, no_ra_lbl ;\ | |
205 | .empty ;\ | |
206 | GET_MAPIN_ENTRY(guest, scr1, paddr) ;\ | |
207 | /* !! paddr mapin entry addr */ ;\ | |
208 | /* check permissions */ ;\ | |
209 | ldub [paddr + LDC_MI_PERMS], scr1 ;\ | |
210 | srlx scr1, LDC_MAP_IOR_BIT, scr1 ;\ | |
211 | andcc scr1, 0x3, %g0 ;\ | |
212 | beq,pn %xcc, no_perm_lbl ;\ | |
213 | .empty ;\ | |
214 | /* get the PA */ ;\ | |
215 | ldx [paddr + LDC_MI_PA], paddr | |
216 | /* END CSTYLED */ | |
217 | ||
218 | ||
219 | #if defined(CONFIG_FPGA) /* { */ | |
220 | /* | |
221 | * Macro to copy a packet from an LDC queue into an SRAM LDC queue. | |
222 | * | |
223 | * Inputs: | |
224 | * src - (modified) - PA of the source | |
225 | * dst - (modified) - PA of the destination | |
226 | * scr1 - (modified) - Scratch register | |
227 | * scr2 - (modified) - Scratch register | |
228 | * | |
229 | * Outputs: | |
230 | * src - PA of the byte following the source packet just copied | |
231 | * dst - PA of the byte following the destination packet just filled | |
232 | * | |
233 | * XXX - TODO This is probably where we will want to compute checksum | |
234 | * and fill in other SRAM LDC header stuff. | |
235 | */ | |
236 | /* BEGIN CSTYLED */ | |
237 | #define LDC_COPY_PKT_TO_SRAM(src, dst, scr1, scr2) \ | |
238 | .pushlocals; \ | |
239 | set Q_EL_SIZE - 1, scr1; \ | |
240 | 0: \ | |
241 | ldub [src], scr2; \ | |
242 | stb scr2, [dst]; \ | |
243 | inc src; \ | |
244 | dec scr1; \ | |
245 | brgez,pt scr1, 0b; \ | |
246 | inc dst; \ | |
247 | .poplocals | |
248 | /* END CSTYLED */ | |
249 | ||
250 | ||
251 | /* | |
252 | * Macro to copy a packet from an SRAM queue into an LDC queue. | |
253 | * | |
254 | * Inputs: | |
255 | * src - (modified) - PA of the source | |
256 | * dst - (modified) - PA of the destination | |
257 | * scr1 - (modified) - Scratch register | |
258 | * scr2 - (modified) - Scratch register | |
259 | * | |
260 | * Outputs: | |
261 | * src - PA of the byte following the source packet just copied | |
262 | * dst - PA of the byte following the destination packet just filled | |
263 | * | |
264 | * XXX - TODO This is probably where we will want to verify checksum | |
265 | */ | |
266 | /* BEGIN CSTYLED */ | |
267 | #define LDC_COPY_PKT_FROM_SRAM(src, dst, scr1, scr2) \ | |
268 | .pushlocals; \ | |
269 | set SRAM_LDC_QENTRY_SIZE - 1, scr1; \ | |
270 | 0: \ | |
271 | ldub [src], scr2; \ | |
272 | stb scr2, [dst]; \ | |
273 | inc src; \ | |
274 | dec scr1; \ | |
275 | brgez,pt scr1, 0b; \ | |
276 | inc dst; \ | |
277 | .poplocals | |
278 | /* END CSTYLED */ | |
279 | ||
280 | ||
281 | /* | |
282 | * Macro to calculate how many bytes of data are available to be read | |
283 | * from a given LDC queue in one pass. | |
284 | * | |
285 | * Inputs: | |
286 | * head - (unmodified) - byte offset of the head pointer | |
287 | * tail - (modified) - byte offset of the tail pointer | |
288 | * qsize - (unmodified) - size (in bytes) of the queue | |
289 | * | |
290 | * Output: | |
291 | * tail - Contains the number of bytes of data available. | |
292 | */ | |
293 | /* BEGIN CSTYLED */ | |
294 | #define LDC_QUEUE_DATA_AVAILABLE(head, tail, qsize) \ | |
295 | .pushlocals; \ | |
296 | brz,a qsize, 0f; /* obvious case */ \ | |
297 | clr tail; \ | |
298 | sub tail, head, tail; /* check (tail - head) */ \ | |
299 | brgez tail, 0f; /* If non-negative, then that's */ \ | |
300 | nop; /* how many bytes are available */ \ | |
301 | sub qsize, head, tail; /* else (size - head) bytes */ \ | |
302 | 0: \ | |
303 | .poplocals | |
304 | /* END CSTYLED */ | |
305 | ||
306 | ||
307 | /* | |
308 | * Macro to calculate how many bytes of space are available to be written | |
309 | * into a given LDC queue in one pass. | |
310 | * | |
311 | * Inputs: | |
312 | * head - (modified) - byte offset of the head pointer | |
313 | * tail - (unmodified) - byte offset of the tail pointer | |
314 | * qsize - (modified) - size (in bytes) of the queue | |
315 | * element_size - (contant) - size (in bytes) of one queue element | |
316 | * | |
317 | * Output: | |
318 | * head - Amount of space (in bytes) available in the queue. | |
319 | */ | |
320 | /* BEGIN CSTYLED */ | |
321 | #define LDC_QUEUE_SPACE_AVAILABLE(head, tail, qsize, element_size) \ | |
322 | .pushlocals; \ | |
323 | brz,a qsize, 1f; /* no space available if qsize is 0 */ \ | |
324 | clr head; \ | |
325 | brz,a,pn head, 0f; /* cannot fill queue completely so.. */ \ | |
326 | sub qsize, element_size, qsize; /* adjust qsize if.. */ \ | |
327 | 0: /* head is zero */ \ | |
328 | sub head, tail, head; /* space = (head - tail) ... */ \ | |
329 | sub head, element_size, head; /* minus 1 element */ \ | |
330 | brlz,a head, 1f; /* If negative value, then */ \ | |
331 | sub qsize, tail, head; /* space = (size - tail) */ \ | |
332 | 1: \ | |
333 | .poplocals | |
334 | /* END CSTYLED */ | |
335 | ||
336 | ||
337 | /* | |
338 | * For LDC, we use the FPGA interrupt status bits for LDC specific | |
339 | * purposes that don't quite map well to thier original names in the | |
340 | * context of service mailboxes. | |
341 | */ | |
342 | #define SP_LDC_SPACE QINTR_ACK | |
343 | #define SP_LDC_DATA QINTR_BUSY | |
344 | #define SP_LDC_STATE_CHG QINTR_NACK | |
345 | ||
346 | /* | |
347 | * Send the SP an interrupt on the LDC IN channel. | |
348 | */ | |
349 | /* BEGIN CSTYLED */ | |
350 | /* | |
351 | * Assume for now, that we are not adding any header information | |
352 | * to the LDC packets as they go through the SRAM. This assumption | |
353 | * will break if the sram_ldc_qentry struct changes. | |
354 | */ | |
355 | #define LDC_SRAM_Q_EL_SIZE_SHIFT Q_EL_SIZE_SHIFT | |
356 | ||
357 | /* | |
358 | * NOTE: If SRAM_LDC_QENTRY_SIZE remains a power of 2, then we can | |
359 | * use shifts. Otherwise, we will have to use mulx/udivx. | |
360 | */ | |
361 | #define LDC_SRAM_IDX_TO_OFFSET(idx) \ | |
362 | sllx idx, LDC_SRAM_Q_EL_SIZE_SHIFT, idx | |
363 | ||
364 | #define LDC_SRAM_OFFSET_TO_IDX(offset) \ | |
365 | srlx offset, LDC_SRAM_Q_EL_SIZE_SHIFT, offset | |
366 | ||
367 | #define LDC_IDX_TO_OFFSET(idx) \ | |
368 | sllx idx, Q_EL_SIZE_SHIFT, idx | |
369 | ||
370 | #define LDC_OFFSET_TO_IDX(offset) \ | |
371 | srlx offset, Q_EL_SIZE_SHIFT, offset | |
372 | ||
373 | ||
374 | /* | |
375 | * following must be defined (identically) on both sides of the _ASM boundary | |
376 | */ | |
377 | #endif /* } CONFIG_FPGA */ | |
378 | ||
379 | /* FIXME: see comment below about structures used by CONFIG_FPGA */ | |
380 | #if 1 || defined(CONFIG_FPGA) | |
381 | #define SRAM_LDC_ENTRIES_PER_QUEUE 4 | |
382 | #endif | |
383 | ||
384 | #ifndef _ASM | |
385 | ||
386 | /* | |
387 | * Each LDC endpoint has a Tx and Rx interrupt associated | |
388 | * with it. the mapreg structure stores the INO, the target | |
389 | * CPU and guest specified cookie for the interrupt. | |
390 | * It also stores info on whether the interrupt is valid, | |
391 | * along with its current state. | |
392 | * | |
393 | * There is a back pointer to the endpoint the interrupt | |
394 | * is associated with | |
395 | */ | |
396 | typedef struct ldc_mapreg ldc_mapreg_t; | |
397 | ||
398 | struct ldc_mapreg { | |
399 | uint32_t state; /* interrupt state */ | |
400 | uint8_t valid; /* valid ? */ | |
401 | ||
402 | uint64_t ino; /* devino -- from MD */ | |
403 | uint64_t pcpup; /* tgt cpu to which notif sent */ | |
404 | uint64_t cookie; /* intr cookie sent to the tgt cpu */ | |
405 | ||
406 | uint64_t endpoint; /* endpoint to which this belongs */ | |
407 | }; | |
408 | ||
409 | /* | |
410 | * An LDC endpoint within a guest or hypervisor | |
411 | * Dont need a global LDC structure, since Zeus maintains the | |
412 | * global information. | |
413 | */ | |
414 | ||
415 | typedef struct ldc_endpoint ldc_endpoint_t; | |
416 | typedef struct ldce_parse ldce_parse_t; | |
417 | ||
418 | struct ldce_parse { | |
419 | resource_t res; | |
420 | uint8_t svc_id; | |
421 | uint8_t is_private; | |
422 | uint8_t target_type; | |
423 | uint16_t target_channel; | |
424 | uint16_t channel; | |
425 | uint16_t tx_ino; | |
426 | uint16_t rx_ino; | |
427 | struct guest *target_guestp; | |
428 | }; | |
429 | ||
430 | struct ldc_endpoint { | |
431 | /* Note: channel_idx must be first element in endpoint struct */ | |
432 | uint8_t channel_idx; /* channel index */ | |
433 | uint8_t is_live; /* is non-zero if LDC channel is open */ | |
434 | uint8_t is_private; /* private svc guest endpoint */ | |
435 | uint8_t svc_id; | |
436 | uint8_t rx_updated; /* updated Rx queue was updated */ | |
437 | ||
438 | uint8_t txq_full; /* flag used for TX notifications */ | |
439 | ||
440 | uint64_t tx_qbase_ra; | |
441 | uint64_t tx_qbase_pa; | |
442 | uint64_t tx_qsize; | |
443 | uint32_t tx_qhead; | |
444 | uint32_t tx_qtail; | |
445 | ||
446 | uint64_t tx_cb; /* Tx callback */ | |
447 | uint64_t tx_cbarg; /* Tx callback arg */ | |
448 | ||
449 | struct ldc_mapreg tx_mapreg; | |
450 | ||
451 | uint64_t rx_qbase_ra; | |
452 | uint64_t rx_qbase_pa; | |
453 | uint64_t rx_qsize; | |
454 | uint32_t rx_qhead; | |
455 | uint32_t rx_qtail; | |
456 | ||
457 | uint64_t rx_cb; /* Rx callback */ | |
458 | uint64_t rx_cbarg; /* Rx callback arg */ | |
459 | ||
460 | struct ldc_mapreg rx_mapreg; | |
461 | ||
462 | vdev_mapreg_t *rx_vintr_cookie; | |
463 | ||
464 | /* | |
465 | * The other end point for sending to | |
466 | * Must be in another guest .. and another cpu | |
467 | * ! Zeus takes care of this | |
468 | */ | |
469 | uint8_t target_type; /* guest, HV, or SP */ | |
470 | struct guest *target_guest; | |
471 | uint64_t target_channel; | |
472 | ||
473 | uint64_t map_table_ra; /* RA of assigned map table */ | |
474 | uint64_t map_table_pa; /* PA of assigned map table */ | |
475 | uint64_t map_table_nentries; /* Map table entries */ | |
476 | uint64_t map_table_sz; /* Size of map table */ | |
477 | ||
478 | ldce_parse_t pip; | |
479 | }; | |
480 | ||
481 | /* | |
482 | * LDC devino to endpoint mapping | |
483 | * | |
484 | * For each device INO the corresponfing channel enpoint | |
485 | * is kept in a lookup table for fast access. This allows | |
486 | * all interrupt mgmt calls to obtain the corresponding | |
487 | * endpoint quickly | |
488 | */ | |
489 | struct ldc_ino2endpoint { | |
490 | void *endpointp; | |
491 | void *mapregp; | |
492 | }; | |
493 | ||
494 | ||
495 | extern ldc_endpoint_t hv_ldcs[]; | |
496 | extern struct guest_console_queues cons_queues[]; | |
497 | ||
498 | /* | |
499 | * LDC shared memory mapin entry | |
500 | */ | |
501 | ||
502 | struct ldc_mapin { | |
503 | uint64_t pa; | |
504 | #define ldc_mapin_next_idx pa /* use as next_idx field when free */ | |
505 | ||
506 | uint64_t mmu_map; | |
507 | ||
508 | uint64_t io_va; | |
509 | uint64_t va; | |
510 | uint16_t va_ctx; | |
511 | ||
512 | uint16_t local_endpoint; | |
513 | ||
514 | uint8_t pg_size; | |
515 | uint8_t perms; | |
516 | ||
517 | uint32_t map_table_idx; | |
518 | }; | |
519 | ||
520 | ||
521 | /* | |
522 | * space for guest to guest console queues is allocated in HV memory | |
523 | */ | |
524 | struct guest_console_queues { | |
525 | uint8_t cons_rxq[Q_EL_SIZE * LDC_CONS_QSIZE]; | |
526 | uint8_t cons_txq[Q_EL_SIZE * LDC_CONS_QSIZE]; | |
527 | }; | |
528 | ||
529 | /* | |
530 | * XXX - For the moment the offsets generation | |
531 | * can't handle the fact that we may not define these | |
532 | * structures in all cases, so we have to have these defines | |
533 | * enabled even if CONFIG_FPGA is not defined (sigh) | |
534 | */ | |
535 | ||
536 | #if 1 || defined(CONFIG_FPGA) /* { */ | |
537 | ||
538 | /* | |
539 | * These structures describes the Queue/header structure as laid out in SRAM | |
540 | * for use by both Hypervisor and VBSC. | |
541 | * Obviously this represents an interface of sorts between HV and VBSC and | |
542 | * thus we have to keep these structure definitions in sync between HV and | |
543 | * VBSC. | |
544 | * | |
545 | * N.B. All accesses to the SRAM must be single byte (uint8_t) so that the | |
546 | * read or write operation is atomic. Thus the fields in our SRAM struct | |
547 | * are all uint8_t. This mean we have to store a head/tail index rather | |
548 | * than offset so that it fits within a single byte. | |
549 | */ | |
550 | ||
551 | struct sram_ldc_qentry { | |
552 | uint64_t pkt_data[8]; /* 64 byte packets */ | |
553 | }; | |
554 | ||
555 | typedef struct sram_ldc_qd sram_ldc_qd_t; | |
556 | ||
557 | /* | |
558 | * If CONFIG_SPLIT_SRAM is defined, the queue data is stored separately | |
559 | * from the queue descriptor data | |
560 | */ | |
561 | #ifdef CONFIG_SPLIT_SRAM | |
562 | typedef struct sram_ldc_q_data sram_ldc_q_data_t; | |
563 | #endif | |
564 | ||
565 | struct sram_ldc_qd { | |
566 | #ifndef CONFIG_SPLIT_SRAM | |
567 | struct sram_ldc_qentry ldc_queue[SRAM_LDC_ENTRIES_PER_QUEUE]; | |
568 | #endif | |
569 | uint8_t head; /* head index for queue */ | |
570 | uint8_t tail; /* tail index for queue */ | |
571 | uint8_t state; /* link UP (1) or DOWN (0) */ | |
572 | uint8_t state_updated; /* flag indicating link has been reset */ | |
573 | uint8_t state_notify; /* flag indicating reset notification */ | |
574 | #ifndef CONFIG_SPLIT_SRAM | |
575 | uint8_t padding[59]; /* reserve some space for future */ | |
576 | #endif | |
577 | }; | |
578 | ||
579 | #ifdef CONFIG_SPLIT_SRAM | |
580 | struct sram_ldc_q_data { | |
581 | struct sram_ldc_qentry ldc_queue[SRAM_LDC_ENTRIES_PER_QUEUE]; | |
582 | }; | |
583 | #endif | |
584 | ||
585 | /* | |
586 | * SP LDC Endpoint | |
587 | */ | |
588 | typedef struct sp_ldc_endpoint sp_ldc_endpoint_t; | |
589 | ||
590 | struct sp_ldc_endpoint { | |
591 | /* Note: channel_idx must be first element in endpoint struct */ | |
592 | uint8_t channel_idx; /* channel index */ | |
593 | uint8_t is_live; /* is non-zero if LDC channel is open */ | |
594 | uint8_t target_type; /* guest or HV */ | |
595 | ||
596 | sram_ldc_qd_t *tx_qd_pa; | |
597 | sram_ldc_qd_t *rx_qd_pa; | |
598 | #ifdef CONFIG_SPLIT_SRAM | |
599 | sram_ldc_q_data_t *tx_q_data_pa; | |
600 | sram_ldc_q_data_t *rx_q_data_pa; | |
601 | #endif | |
602 | ||
603 | struct guest *target_guest; /* The guest at the other endpoint */ | |
604 | uint64_t target_channel; /* Channel num of the other endpt */ | |
605 | uint64_t tx_lock; /* synchronize access to endpoints */ | |
606 | uint64_t rx_lock; /* synchronize access to endpoints */ | |
607 | ||
608 | uint32_t tx_scr_txhead; /* use freely if you own tx_lock */ | |
609 | uint32_t tx_scr_txtail; /* use freely if you own tx_lock */ | |
610 | uint64_t tx_scr_txsize; /* use freely if you own tx_lock */ | |
611 | uint64_t tx_scr_tx_qpa; /* use freely if you own tx_lock */ | |
612 | uint32_t tx_scr_rxhead; /* use freely if you own tx_lock */ | |
613 | uint32_t tx_scr_rxtail; /* use freely if you own tx_lock */ | |
614 | uint64_t tx_scr_rxsize; /* use freely if you own tx_lock */ | |
615 | uint64_t tx_scr_rx_qpa; /* use freely if you own tx_lock */ | |
616 | #ifdef CONFIG_SPLIT_SRAM | |
617 | uint64_t tx_scr_rx_qdpa; /* use freely if you own tx_lock */ | |
618 | #endif | |
619 | uint64_t tx_scr_target; /* use freely if you own tx_lock */ | |
620 | ||
621 | uint32_t rx_scr_txhead; /* use freely if you own rx_lock */ | |
622 | uint32_t rx_scr_txtail; /* use freely if you own rx_lock */ | |
623 | uint64_t rx_scr_txsize; /* use freely if you own rx_lock */ | |
624 | uint64_t rx_scr_tx_qpa; /* use freely if you own rx_lock */ | |
625 | #ifdef CONFIG_SPLIT_SRAM | |
626 | uint64_t rx_scr_tx_qdpa; /* use freely if you own rx_lock */ | |
627 | #endif | |
628 | uint32_t rx_scr_rxhead; /* use freely if you own rx_lock */ | |
629 | uint32_t rx_scr_rxtail; /* use freely if you own rx_lock */ | |
630 | uint64_t rx_scr_rxsize; /* use freely if you own rx_lock */ | |
631 | uint64_t rx_scr_rx_qpa; /* use freely if you own rx_lock */ | |
632 | uint64_t rx_scr_target; /* use freely if you own rx_lock */ | |
633 | ||
634 | struct sram_ldc_qentry rx_scr_pkt; /* scratch buffer */ | |
635 | }; | |
636 | ||
637 | extern sp_ldc_endpoint_t sp_ldcs[]; | |
638 | ||
639 | #endif /* } CONFIG_FPGA */ | |
640 | ||
641 | ||
642 | #endif /* !_ASM */ | |
643 | ||
644 | #ifdef __cplusplus | |
645 | } | |
646 | #endif | |
647 | ||
648 | #endif /* _LDC_H */ |