Updated `README.md` with instructions for building/using the kernel module.
[xeon-phi-kernel-module] / micscif / micscif_rb.c
CommitLineData
800f879a
AT
1/*
2 * Copyright 2010-2017 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * Disclaimer: The codes contained in these modules may be specific to
14 * the Intel Software Development Platform codenamed Knights Ferry,
15 * and the Intel product codenamed Knights Corner, and are not backward
16 * compatible with other Intel products. Additionally, Intel will NOT
17 * support the codes or instruction set in future products.
18 *
19 * Intel offers no warranty of any kind regarding the code. This code is
20 * licensed on an "AS IS" basis and Intel is not obligated to provide
21 * any support, assistance, installation, training, or other services
22 * of any kind. Intel is also not obligated to provide any updates,
23 * enhancements or extensions. Intel specifically disclaims any warranty
24 * of merchantability, non-infringement, fitness for any particular
25 * purpose, and any other warranty.
26 *
27 * Further, Intel disclaims all liability of any kind, including but
28 * not limited to liability for infringement of any proprietary rights,
29 * relating to the use of the code, even if Intel is notified of the
30 * possibility of such liability. Except as expressly stated in an Intel
31 * license agreement provided with this code and agreed upon with Intel,
32 * no license, express or implied, by estoppel or otherwise, to any
33 * intellectual property rights is granted herein.
34 */
35
36#include "mic/micscif.h"
37#include "mic/micscif_rb.h"
38
39#include <linux/circ_buf.h>
40#include <linux/module.h>
41#define count_in_ring(head, tail, size) CIRC_CNT(head, tail, size)
42#define space_in_ring(head, tail, size) CIRC_SPACE(head, tail, size)
43
44MODULE_LICENSE("GPL");
45
46static void *micscif_rb_get(struct micscif_rb *rb, uint32_t size);
47
48/**
49 * micscif_rb_init - To Initialize the RingBuffer
50 * @rb: The RingBuffer context
51 * @read_ptr: A pointer to the memory location containing
52 * the updated read pointer
53 * @write_ptr: A pointer to the memory location containing
54 * the updated write pointer
55 * @rb_base: The pointer to the ring buffer
56 * @size: The size of the ring buffer
57 */
58void micscif_rb_init(struct micscif_rb *rb,
59 volatile uint32_t *read_ptr,
60 volatile uint32_t *write_ptr,
61 volatile void *rb_base,
62 const uint32_t size)
63{
64 /* Size must be a power of two -- all logic assoicated with
65 * incrementing the read and write pointers relies on the size
66 * being a power of 2
67 */
68 BUG_ON((size & (size-1)) != 0);
69 rb->rb_base = rb_base;
70 rb->size = size;
71 rb->read_ptr = read_ptr;
72 rb->write_ptr = write_ptr;
73 rb->current_read_offset = *read_ptr;
74 rb->current_write_offset = *write_ptr;
75}
76EXPORT_SYMBOL(micscif_rb_init);
77
78/**
79 * micscif_rb_reset - To reset the RingBuffer
80 * @rb - The RingBuffer context
81 */
82void micscif_rb_reset(struct micscif_rb *rb)
83{
84 /*
85 * XPU_RACE_CONDITION: write followed by read
86 * MFENCE after write
87 * Read should take care of SBOX sync
88 * Ponters are volatile (see RingBuffer declaration)
89 */
90 *rb->read_ptr = 0x0;
91 *rb->write_ptr = 0x0;
92 smp_mb();
93 rb->current_write_offset = *rb->write_ptr;
94 rb->current_read_offset = *rb->read_ptr;
95}
96EXPORT_SYMBOL(micscif_rb_reset);
97
98/* Copies a message to the ring buffer -- handles the wrap around case */
99static int memcpy_torb(struct micscif_rb *rb, void *header,
100 void *msg, uint32_t size)
101{
102 /* Need to call two copies if it wraps around */
103 uint32_t size1, size2;
104 if ((char*)header + size >= (char*)rb->rb_base + rb->size) {
105 size1 = (uint32_t) ( ((char*)rb->rb_base + rb->size) - (char*)header);
106 size2 = size - size1;
107 memcpy_toio(header, msg, size1);
108 memcpy_toio(rb->rb_base, (char*)msg+size1, size2);
109 } else {
110 memcpy_toio(header, msg, size);
111 }
112 return 0;
113}
114
115/* Copies a message from the ring buffer -- handles the wrap around case */
116static int memcpy_fromrb(struct micscif_rb *rb, void *header,
117 void *msg, uint32_t size)
118{
119 /* Need to call two copies if it wraps around */
120 uint32_t size1, size2;
121 if ((char*)header + size >= (char*)rb->rb_base + rb->size) {
122 size1 = (uint32_t) ( ((char*)rb->rb_base + rb->size) - (char*)header );
123 size2 = size - size1;
124 memcpy_fromio(msg, header, size1);
125 memcpy_fromio((char*)msg+size1, rb->rb_base, size2);
126 } else {
127 memcpy_fromio(msg, header, size);
128 }
129 return 0;
130}
131
132/**
133 * micscif_rb_space -
134 * Query space available for writing to the given RB.
135 *
136 * @rb - The RingBuffer context
137 *
138 * Returns: size available for writing to RB in bytes.
139 */
140int micscif_rb_space(struct micscif_rb *rb)
141{
142 rb->old_current_read_offset = rb->current_read_offset;
143
144 rb->current_read_offset = *rb->read_ptr;
145 return space_in_ring(rb->current_write_offset,
146 rb->current_read_offset, rb->size);
147}
148EXPORT_SYMBOL(micscif_rb_space);
149
150/**
151 * micscif_rb_write - Write one package to the given ring buffer
152 * @rb - The RingBuffer context
153 * @msg - The package to be put in the ring buffer
154 * @size - the size (in bytes) you want to copy
155 *
156 * This API does not block if there isn't enough space in the RB.
157 */
158int micscif_rb_write(struct micscif_rb *rb,
159 void *msg,
160 uint32_t size)
161{
162 void *header;
163 int ret = 0;
164 if ((uint32_t)micscif_rb_space(rb) < size)
165 return -ENOMEM;
166 header = (char*)rb->rb_base + rb->current_write_offset;
167 ret = memcpy_torb(rb, header, msg, size);
168 if (!ret) {
169 /*
170 * XPU_RACE_CONDITION: Don't do anything here!
171 * Wait until micscif_rb_commit()
172 * Update the local ring buffer data, not the shared data until commit.
173 */
174 rb->old_current_write_offset = rb->current_write_offset;
175 rb->current_write_offset = (rb->current_write_offset + size) & (rb->size - 1);
176 }
177 return ret;
178}
179EXPORT_SYMBOL(micscif_rb_write);
180
181/*
182 * micscif_rb_get_next
183 * Read from ring buffer.
184 * @rb - The RingBuffer context
185 * @msg - buffer to hold the message. Must be at least size bytes long
186 * @size - Size to be read out passed in, actual bytes read
187 * is returned.
188 * RETURN:
189 * Returns the number of bytes possible to read -- if retVal != size, then
190 * the read does not occur.
191 */
192int micscif_rb_get_next (struct micscif_rb *rb, void *msg, uint32_t size)
193{
194 void *header = NULL;
195 int read_size = 0;
196 /*
197 * warning: RingBufferGet() looks at the shared write pointer
198 */
199 header = micscif_rb_get(rb, size);
200 if (header) {
201 uint32_t next_cmd_offset =
202 (rb->current_read_offset + size) & (rb->size - 1);
203 read_size = size;
204 rb->old_current_read_offset = rb->current_read_offset;
205 rb->current_read_offset = next_cmd_offset;
206 if (memcpy_fromrb(rb, header, msg, size)) // add check here
207 return -EFAULT;
208 }
209 return read_size;
210}
211EXPORT_SYMBOL(micscif_rb_get_next);
212
213/**
214 * micscif_rb_update_read_ptr
215 * @rb - The RingBuffer context
216 */
217void micscif_rb_update_read_ptr(struct micscif_rb *rb)
218{
219 uint32_t old_offset;
220 uint32_t new_offset;
221 smp_mb();
222 old_offset = rb->old_current_read_offset;
223 new_offset = rb->current_read_offset;
224
225 /*
226 * XPU_RACE_CONDITION:
227 * pReadPointer is ready to move
228 * Moving read pointer transfers ownership to MIC
229 * What if MICCPU starts writing to buffer before all
230 * writes were flushed?
231 * Need to flush out all pending writes before pointer update
232 */
233 smp_mb();
234
235#ifdef CONFIG_ML1OM
236 serializing_request((volatile uint8_t*) rb->rb_base+old_offset);
237#endif
238
239 *rb->read_ptr = new_offset;
240#ifdef CONFIG_ML1OM
241 /*
242 * Readback since KNF doesn't guarantee that PCI ordering is maintained.
243 * Need a memory barrier on the host before the readback so the readback
244 * doesn't load from the write combining buffer but will go across to the
245 * PCI bus that will then flush the posted write to the device.
246 */
247 smp_mb();
248 serializing_request(rb->read_ptr);
249#endif
250#if defined(CONFIG_MK1OM) && defined(_MIC_SCIF_)
251 /*
252 * KNC Si HSD 3853952: For the case where a Core is performing an EXT_WR
253 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
254 * same address with the same data before it does the Doorbell Write.
255 * This way, if ordering is violate for the Interrupt Message, it will
256 * fall just behind the first Posted associated with the first EXT_WR.
257 */
258 *rb->read_ptr = new_offset;
259#endif
260 smp_mb();
261}
262EXPORT_SYMBOL(micscif_rb_update_read_ptr);
263
264/**
265 * micscif_rb_count
266 * @rb - The RingBuffer context
267 * RETURN: number of empty slots in the RB
268 */
269uint32_t micscif_rb_count(struct micscif_rb *rb, uint32_t size)
270{
271 if (count_in_ring(rb->current_write_offset,
272 rb->current_read_offset,
273 rb->size) < size) {
274 /*
275 * Update from the HW write pointer if empty
276 */
277 rb->old_current_write_offset = rb->current_write_offset;
278 rb->current_write_offset = *rb->write_ptr;
279 }
280 return count_in_ring(rb->current_write_offset,
281 rb->current_read_offset,
282 rb->size);
283}
284EXPORT_SYMBOL(micscif_rb_count);
285
286/**
287 * micscif_rb_commit
288 * To submit the buffer to let the uOS to fetch it
289 * @rb - The RingBuffer context
290 */
291void micscif_rb_commit(struct micscif_rb *rb)
292{
293 /*
294 * XPU_RACE_CONDITION:
295 * Writing to ringbuffer memory before updating the pointer
296 * can be out-of-order and write combined.
297 * This is the point where we start to care about
298 * consistency of the data.
299 * There are two race conditions below:
300 * (1) Ring buffer pointer moves before all data is flushed:
301 * if uOS is late taking the interrupt for the previous transaction,
302 * it may take the new write pointer immediately
303 * and start accessing data in the ringbuffer.
304 * Ring buffer data must be consistent before we update the write
305 * pointer. We read back the address at oldCurrentWriteOffset
306 * -- this is the location in memory written during the last
307 * ring buffer operation; keep in mind that ring buffers and ring buffer
308 * pointers can be in different kinds of memory (host vs MIC,
309 * depending on currently active workaround flags.
310 * (2) If uOS takes interrupt while write pointer value is still
311 * in-flight may result in uOS reading old value, message being lost,
312 * and the deadlock. Must put another memory barrier after readback --
313 * revents read-passing-read from later read
314 */
315 smp_mb();
316#ifdef CONFIG_ML1OM
317 /*
318 * Also makes sure the following read is not reordered
319 */
320 serializing_request((char*)rb->rb_base + rb->current_write_offset);
321#endif
322 *rb->write_ptr = rb->current_write_offset;
323#ifdef CONFIG_ML1OM
324 /*
325 * Readback since KNF doesn't guarantee that PCI ordering is maintained.
326 * Need a memory barrier on the host before the readback so the readback
327 * doesn't load from the write combining buffer but will go across to the
328 * PCI bus that will then flush the posted write to the device.
329 */
330 smp_mb();
331 serializing_request(rb->write_ptr);
332#endif
333#if defined(CONFIG_MK1OM) && defined(_MIC_SCIF_)
334 /*
335 * KNC Si HSD 3853952: For the case where a Core is performing an EXT_WR
336 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
337 * same address with the same data before it does the Doorbell Write.
338 * This way, if ordering is violate for the Interrupt Message, it will
339 * fall just behind the first Posted associated with the first EXT_WR.
340 */
341 *rb->write_ptr = rb->current_write_offset;
342#endif
343 smp_mb();
344}
345EXPORT_SYMBOL(micscif_rb_commit);
346
347/**
348 * micscif_rb_get
349 * To get next packet from the ring buffer
350 * @rb - The RingBuffer context
351 * RETURN:
352 * NULL if no packet in the ring buffer
353 * Otherwise The pointer of the next packet
354 */
355static void *micscif_rb_get(struct micscif_rb *rb, uint32_t size)
356{
357 void *header = NULL;
358
359 if (micscif_rb_count(rb, size) >= size)
360 header = (char*)rb->rb_base + rb->current_read_offset;
361 return header;
362}
363
364/**
365 * micscif_rb_get_version
366 * Return the ring buffer module version
367 */
368uint16_t micscif_rb_get_version(void)
369{
370 return RING_BUFFER_VERSION;
371}
372EXPORT_SYMBOL(micscif_rb_get_version);