Line data Source code
1 : /*
2 : *------------------------------------------------------------------
3 : * Copyright (c) 2016 Cisco and/or its affiliates.
4 : * Licensed under the Apache License, Version 2.0 (the "License");
5 : * you may not use this file except in compliance with the License.
6 : * You may obtain a copy of the License at:
7 : *
8 : * http://www.apache.org/licenses/LICENSE-2.0
9 : *
10 : * Unless required by applicable law or agreed to in writing, software
11 : * distributed under the License is distributed on an "AS IS" BASIS,
12 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 : * See the License for the specific language governing permissions and
14 : * limitations under the License.
15 : *------------------------------------------------------------------
16 : */
17 :
18 : #define _GNU_SOURCE
19 : #include <stdint.h>
20 : #include <net/if.h>
21 : #include <sys/ioctl.h>
22 : #include <sys/uio.h>
23 :
24 : #include <vlib/vlib.h>
25 : #include <vlib/unix/unix.h>
26 : #include <vnet/ethernet/ethernet.h>
27 :
28 : #include <memif/memif.h>
29 : #include <memif/private.h>
30 :
31 : #define foreach_memif_tx_func_error \
32 : _ (NO_FREE_SLOTS, no_free_slots, ERROR, "no free tx slots") \
33 : _ (ROLLBACK, rollback, ERROR, "no enough space in tx buffers")
34 :
35 : typedef enum
36 : {
37 : #define _(f, n, s, d) MEMIF_TX_ERROR_##f,
38 : foreach_memif_tx_func_error
39 : #undef _
40 : MEMIF_TX_N_ERROR,
41 : } memif_tx_func_error_t;
42 :
43 : static vlib_error_desc_t memif_tx_func_error_counters[] = {
44 : #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
45 : foreach_memif_tx_func_error
46 : #undef _
47 : };
48 :
49 : #ifndef CLIB_MARCH_VARIANT
50 : u8 *
51 77 : format_memif_device_name (u8 * s, va_list * args)
52 : {
53 77 : u32 dev_instance = va_arg (*args, u32);
54 77 : memif_main_t *mm = &memif_main;
55 77 : memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance);
56 : memif_socket_file_t *msf;
57 :
58 77 : msf = pool_elt_at_index (mm->socket_files, mif->socket_file_index);
59 77 : s = format (s, "memif%lu/%lu", msf->socket_id, mif->id);
60 77 : return s;
61 : }
62 : #endif
63 :
64 : static u8 *
65 6 : format_memif_device (u8 * s, va_list * args)
66 : {
67 6 : u32 dev_instance = va_arg (*args, u32);
68 6 : int verbose = va_arg (*args, int);
69 6 : u32 indent = format_get_indent (s);
70 :
71 6 : s = format (s, "MEMIF interface");
72 6 : if (verbose)
73 : {
74 6 : s = format (s, "\n%U instance %u", format_white_space, indent + 2,
75 : dev_instance);
76 : }
77 6 : return s;
78 : }
79 :
80 : static u8 *
81 0 : format_memif_tx_trace (u8 * s, va_list * args)
82 : {
83 0 : s = format (s, "Unimplemented...");
84 0 : return s;
85 : }
86 :
87 : static_always_inline void
88 12 : memif_add_copy_op (memif_per_thread_data_t * ptd, void *data, u32 len,
89 : u16 buffer_offset, u16 buffer_vec_index)
90 : {
91 : memif_copy_op_t *co;
92 12 : vec_add2_aligned (ptd->copy_ops, co, 1, CLIB_CACHE_LINE_BYTES);
93 12 : co->data = data;
94 12 : co->data_len = len;
95 12 : co->buffer_offset = buffer_offset;
96 12 : co->buffer_vec_index = buffer_vec_index;
97 12 : }
98 :
99 : static_always_inline uword
100 12 : memif_interface_tx_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
101 : u32 *buffers, memif_if_t *mif,
102 : memif_ring_type_t type, memif_queue_t *mq,
103 : memif_per_thread_data_t *ptd, u32 n_left)
104 : {
105 : memif_ring_t *ring;
106 : u32 n_copy_op;
107 : u16 ring_size, mask, slot, free_slots;
108 12 : int n_retries = 5;
109 : vlib_buffer_t *b0, *b1, *b2, *b3;
110 : memif_copy_op_t *co;
111 12 : memif_region_index_t last_region = ~0;
112 12 : void *last_region_shm = 0;
113 : u16 head, tail;
114 :
115 12 : ring = mq->ring;
116 12 : ring_size = 1 << mq->log2_ring_size;
117 12 : mask = ring_size - 1;
118 :
119 12 : retry:
120 :
121 12 : if (type == MEMIF_RING_S2M)
122 : {
123 0 : slot = head = ring->head;
124 0 : tail = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
125 0 : mq->last_tail += tail - mq->last_tail;
126 0 : free_slots = ring_size - head + mq->last_tail;
127 : }
128 : else
129 : {
130 12 : slot = tail = ring->tail;
131 12 : head = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE);
132 12 : mq->last_tail += tail - mq->last_tail;
133 12 : free_slots = head - tail;
134 : }
135 :
136 24 : while (n_left && free_slots)
137 : {
138 : memif_desc_t *d0;
139 : void *mb0;
140 : i32 src_off;
141 : u32 bi0, dst_off, src_left, dst_left, bytes_to_copy;
142 12 : u32 saved_ptd_copy_ops_len = _vec_len (ptd->copy_ops);
143 12 : u32 saved_ptd_buffers_len = _vec_len (ptd->buffers);
144 12 : u16 saved_slot = slot;
145 :
146 12 : clib_prefetch_load (&ring->desc[(slot + 8) & mask]);
147 :
148 12 : d0 = &ring->desc[slot & mask];
149 12 : if (PREDICT_FALSE (last_region != d0->region))
150 : {
151 12 : last_region_shm = mif->regions[d0->region].shm;
152 12 : last_region = d0->region;
153 : }
154 12 : mb0 = last_region_shm + d0->offset;
155 :
156 12 : dst_off = 0;
157 :
158 : /* slave is the producer, so it should be able to reset buffer length */
159 12 : dst_left = (type == MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length;
160 :
161 12 : if (PREDICT_TRUE (n_left >= 4))
162 0 : vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
163 12 : bi0 = buffers[0];
164 :
165 12 : next_in_chain:
166 :
167 12 : b0 = vlib_get_buffer (vm, bi0);
168 12 : src_off = b0->current_data;
169 12 : src_left = b0->current_length;
170 :
171 24 : while (src_left)
172 : {
173 12 : if (PREDICT_FALSE (dst_left == 0))
174 : {
175 0 : if (free_slots)
176 : {
177 0 : slot++;
178 0 : free_slots--;
179 0 : d0->length = dst_off;
180 0 : d0->flags = MEMIF_DESC_FLAG_NEXT;
181 0 : d0 = &ring->desc[slot & mask];
182 0 : dst_off = 0;
183 0 : dst_left =
184 : (type ==
185 0 : MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length;
186 :
187 0 : if (PREDICT_FALSE (last_region != d0->region))
188 : {
189 0 : last_region_shm = mif->regions[d0->region].shm;
190 0 : last_region = d0->region;
191 : }
192 0 : mb0 = last_region_shm + d0->offset;
193 : }
194 : else
195 : {
196 : /* we need to rollback vectors before bailing out */
197 0 : vec_set_len (ptd->buffers, saved_ptd_buffers_len);
198 0 : vec_set_len (ptd->copy_ops, saved_ptd_copy_ops_len);
199 0 : vlib_error_count (vm, node->node_index,
200 : MEMIF_TX_ERROR_ROLLBACK, 1);
201 0 : slot = saved_slot;
202 0 : goto no_free_slots;
203 : }
204 : }
205 12 : bytes_to_copy = clib_min (src_left, dst_left);
206 12 : memif_add_copy_op (ptd, mb0 + dst_off, bytes_to_copy, src_off,
207 12 : vec_len (ptd->buffers));
208 12 : vec_add1_aligned (ptd->buffers, bi0, CLIB_CACHE_LINE_BYTES);
209 12 : src_off += bytes_to_copy;
210 12 : dst_off += bytes_to_copy;
211 12 : src_left -= bytes_to_copy;
212 12 : dst_left -= bytes_to_copy;
213 : }
214 :
215 12 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
216 : {
217 0 : bi0 = b0->next_buffer;
218 0 : goto next_in_chain;
219 : }
220 :
221 12 : d0->length = dst_off;
222 12 : d0->flags = 0;
223 :
224 12 : free_slots -= 1;
225 12 : slot += 1;
226 :
227 12 : buffers++;
228 12 : n_left--;
229 : }
230 12 : no_free_slots:
231 :
232 : /* copy data */
233 12 : n_copy_op = vec_len (ptd->copy_ops);
234 12 : co = ptd->copy_ops;
235 12 : while (n_copy_op >= 8)
236 : {
237 0 : clib_prefetch_load (co[4].data);
238 0 : clib_prefetch_load (co[5].data);
239 0 : clib_prefetch_load (co[6].data);
240 0 : clib_prefetch_load (co[7].data);
241 :
242 0 : b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
243 0 : b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
244 0 : b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
245 0 : b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
246 :
247 0 : clib_memcpy_fast (co[0].data, b0->data + co[0].buffer_offset,
248 0 : co[0].data_len);
249 0 : clib_memcpy_fast (co[1].data, b1->data + co[1].buffer_offset,
250 0 : co[1].data_len);
251 0 : clib_memcpy_fast (co[2].data, b2->data + co[2].buffer_offset,
252 0 : co[2].data_len);
253 0 : clib_memcpy_fast (co[3].data, b3->data + co[3].buffer_offset,
254 0 : co[3].data_len);
255 :
256 0 : co += 4;
257 0 : n_copy_op -= 4;
258 : }
259 24 : while (n_copy_op)
260 : {
261 12 : b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
262 12 : clib_memcpy_fast (co[0].data, b0->data + co[0].buffer_offset,
263 12 : co[0].data_len);
264 12 : co += 1;
265 12 : n_copy_op -= 1;
266 : }
267 :
268 12 : vec_reset_length (ptd->copy_ops);
269 12 : vec_reset_length (ptd->buffers);
270 :
271 12 : if (type == MEMIF_RING_S2M)
272 0 : __atomic_store_n (&ring->head, slot, __ATOMIC_RELEASE);
273 : else
274 12 : __atomic_store_n (&ring->tail, slot, __ATOMIC_RELEASE);
275 :
276 12 : if (n_left && n_retries--)
277 0 : goto retry;
278 :
279 12 : return n_left;
280 : }
281 :
282 : static_always_inline uword
283 11 : memif_interface_tx_zc_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
284 : u32 *buffers, memif_if_t *mif, memif_queue_t *mq,
285 : memif_per_thread_data_t *ptd, u32 n_left)
286 : {
287 11 : memif_ring_t *ring = mq->ring;
288 : u16 slot, free_slots, n_free;
289 11 : u16 ring_size = 1 << mq->log2_ring_size;
290 11 : u16 mask = ring_size - 1;
291 11 : int n_retries = 5;
292 : vlib_buffer_t *b0;
293 : u16 head, tail;
294 :
295 11 : retry:
296 11 : tail = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
297 11 : slot = head = ring->head;
298 :
299 11 : n_free = tail - mq->last_tail;
300 11 : if (n_free >= 16)
301 : {
302 0 : vlib_buffer_free_from_ring_no_next (vm, mq->buffers,
303 0 : mq->last_tail & mask,
304 : ring_size, n_free);
305 0 : mq->last_tail += n_free;
306 : }
307 :
308 11 : free_slots = ring_size - head + mq->last_tail;
309 :
310 22 : while (n_left && free_slots)
311 : {
312 : u16 s0;
313 11 : u16 slots_in_packet = 1;
314 : memif_desc_t *d0;
315 : u32 bi0;
316 :
317 11 : clib_prefetch_store (&ring->desc[(slot + 8) & mask]);
318 :
319 11 : if (PREDICT_TRUE (n_left >= 4))
320 0 : vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
321 :
322 11 : bi0 = buffers[0];
323 :
324 11 : next_in_chain:
325 11 : s0 = slot & mask;
326 11 : d0 = &ring->desc[s0];
327 11 : mq->buffers[s0] = bi0;
328 11 : b0 = vlib_get_buffer (vm, bi0);
329 :
330 11 : d0->region = b0->buffer_pool_index + 1;
331 11 : d0->offset = (void *) b0->data + b0->current_data -
332 11 : mif->regions[d0->region].shm;
333 11 : d0->length = b0->current_length;
334 :
335 11 : free_slots--;
336 11 : slot++;
337 :
338 11 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
339 : {
340 0 : if (PREDICT_FALSE (free_slots == 0))
341 : {
342 : /* revert to last fully processed packet */
343 0 : free_slots += slots_in_packet;
344 0 : slot -= slots_in_packet;
345 0 : goto no_free_slots;
346 : }
347 :
348 0 : d0->flags = MEMIF_DESC_FLAG_NEXT;
349 0 : bi0 = b0->next_buffer;
350 :
351 : /* next */
352 0 : slots_in_packet++;
353 0 : goto next_in_chain;
354 : }
355 :
356 11 : d0->flags = 0;
357 :
358 : /* next from */
359 11 : buffers++;
360 11 : n_left--;
361 : }
362 11 : no_free_slots:
363 :
364 11 : __atomic_store_n (&ring->head, slot, __ATOMIC_RELEASE);
365 :
366 11 : if (n_left && n_retries--)
367 0 : goto retry;
368 :
369 11 : return n_left;
370 : }
371 :
372 1677 : CLIB_MARCH_FN (memif_tx_dma_completion_cb, void, vlib_main_t *vm,
373 : vlib_dma_batch_t *b)
374 : {
375 0 : memif_main_t *mm = &memif_main;
376 0 : memif_if_t *mif = vec_elt_at_index (mm->interfaces, b->cookie >> 16);
377 0 : memif_queue_t *mq = vec_elt_at_index (mif->tx_queues, b->cookie & 0xffff);
378 0 : memif_dma_info_t *dma_info = mq->dma_info + mq->dma_info_head;
379 0 : memif_per_thread_data_t *ptd = &dma_info->data;
380 :
381 0 : vlib_buffer_free (vm, ptd->buffers, vec_len (ptd->buffers));
382 :
383 0 : dma_info->finished = 1;
384 0 : vec_reset_length (ptd->buffers);
385 0 : vec_reset_length (ptd->copy_ops);
386 :
387 0 : __atomic_store_n (&mq->ring->tail, dma_info->dma_tail, __ATOMIC_RELEASE);
388 :
389 0 : mq->dma_info_head++;
390 0 : if (mq->dma_info_head == mq->dma_info_size)
391 0 : mq->dma_info_head = 0;
392 0 : mq->dma_info_full = 0;
393 0 : }
394 :
395 : #ifndef CLIB_MARCH_VARIANT
396 : void
397 0 : memif_tx_dma_completion_cb (vlib_main_t *vm, vlib_dma_batch_t *b)
398 : {
399 0 : return CLIB_MARCH_FN_SELECT (memif_tx_dma_completion_cb) (vm, b);
400 : }
401 : #endif
402 :
403 : static_always_inline uword
404 0 : memif_interface_tx_dma_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
405 : u32 *buffers, memif_if_t *mif,
406 : memif_ring_type_t type, memif_queue_t *mq,
407 : u32 n_left)
408 : {
409 : memif_ring_t *ring;
410 : u32 n_copy_op;
411 : u16 ring_size, mask, slot, free_slots;
412 0 : int n_retries = 5, fallback = 0;
413 : vlib_buffer_t *b0, *b1, *b2, *b3;
414 : memif_copy_op_t *co;
415 0 : memif_region_index_t last_region = ~0;
416 0 : void *last_region_shm = 0;
417 : u16 head, tail;
418 : memif_dma_info_t *dma_info;
419 : memif_per_thread_data_t *ptd;
420 0 : memif_main_t *mm = &memif_main;
421 0 : u16 mif_id = mif - mm->interfaces;
422 :
423 0 : ring = mq->ring;
424 0 : ring_size = 1 << mq->log2_ring_size;
425 0 : mask = ring_size - 1;
426 :
427 0 : dma_info = mq->dma_info + mq->dma_info_tail;
428 0 : ptd = &dma_info->data;
429 :
430 : /* do software fallback if dma info ring is full */
431 0 : u16 dma_mask = mq->dma_info_size - 1;
432 0 : if ((((mq->dma_info_tail + 1) & dma_mask) == mq->dma_info_head) ||
433 0 : ((mq->dma_info_head == dma_mask) && (mq->dma_info_tail == 0)))
434 : {
435 0 : if (!mq->dma_info_full)
436 0 : mq->dma_info_full = 1;
437 : else
438 0 : fallback = 1;
439 : }
440 :
441 0 : vlib_dma_batch_t *b = NULL;
442 0 : if (PREDICT_TRUE (!fallback))
443 0 : b = vlib_dma_batch_new (vm, mif->dma_tx_config);
444 0 : if (!b)
445 0 : return n_left;
446 :
447 0 : retry:
448 :
449 0 : slot = tail = mq->dma_tail;
450 0 : head = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE);
451 0 : mq->last_tail += tail - mq->last_tail;
452 0 : free_slots = head - mq->dma_tail;
453 :
454 0 : while (n_left && free_slots)
455 : {
456 : memif_desc_t *d0;
457 : void *mb0;
458 : i32 src_off;
459 : u32 bi0, dst_off, src_left, dst_left, bytes_to_copy;
460 0 : u32 saved_ptd_copy_ops_len = _vec_len (ptd->copy_ops);
461 0 : u32 saved_ptd_buffers_len = _vec_len (ptd->buffers);
462 0 : u16 saved_slot = slot;
463 :
464 0 : clib_prefetch_load (&ring->desc[(slot + 8) & mask]);
465 :
466 0 : d0 = &ring->desc[slot & mask];
467 0 : if (PREDICT_FALSE (last_region != d0->region))
468 : {
469 0 : last_region_shm = mif->regions[d0->region].shm;
470 0 : last_region = d0->region;
471 : }
472 0 : mb0 = last_region_shm + d0->offset;
473 :
474 0 : dst_off = 0;
475 :
476 : /* slave is the producer, so it should be able to reset buffer length */
477 0 : dst_left = d0->length;
478 :
479 0 : if (PREDICT_TRUE (n_left >= 4))
480 0 : vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
481 0 : bi0 = buffers[0];
482 :
483 0 : next_in_chain:
484 :
485 0 : b0 = vlib_get_buffer (vm, bi0);
486 0 : src_off = b0->current_data;
487 0 : src_left = b0->current_length;
488 :
489 0 : while (src_left)
490 : {
491 0 : if (PREDICT_FALSE (dst_left == 0))
492 : {
493 0 : if (free_slots)
494 : {
495 0 : d0->length = dst_off;
496 0 : d0->flags = MEMIF_DESC_FLAG_NEXT;
497 0 : d0 = &ring->desc[slot & mask];
498 0 : dst_off = 0;
499 0 : dst_left = (type == MEMIF_RING_S2M) ? mif->run.buffer_size :
500 : d0->length;
501 :
502 0 : if (PREDICT_FALSE (last_region != d0->region))
503 : {
504 0 : last_region_shm = mif->regions[d0->region].shm;
505 0 : last_region = d0->region;
506 : }
507 0 : mb0 = last_region_shm + d0->offset;
508 : }
509 : else
510 : {
511 : /* we need to rollback vectors before bailing out */
512 0 : vec_set_len (ptd->buffers, saved_ptd_buffers_len);
513 0 : vec_set_len (ptd->copy_ops, saved_ptd_copy_ops_len);
514 0 : vlib_error_count (vm, node->node_index,
515 : MEMIF_TX_ERROR_ROLLBACK, 1);
516 0 : slot = saved_slot;
517 0 : goto no_free_slots;
518 : }
519 : }
520 0 : bytes_to_copy = clib_min (src_left, dst_left);
521 0 : memif_add_copy_op (ptd, mb0 + dst_off, bytes_to_copy, src_off,
522 0 : vec_len (ptd->buffers));
523 0 : src_off += bytes_to_copy;
524 0 : dst_off += bytes_to_copy;
525 0 : src_left -= bytes_to_copy;
526 0 : dst_left -= bytes_to_copy;
527 : }
528 :
529 0 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
530 : {
531 0 : slot++;
532 0 : free_slots--;
533 0 : bi0 = b0->next_buffer;
534 0 : goto next_in_chain;
535 : }
536 :
537 0 : vec_add1_aligned (ptd->buffers, buffers[0], CLIB_CACHE_LINE_BYTES);
538 0 : d0->length = dst_off;
539 0 : d0->flags = 0;
540 :
541 0 : free_slots -= 1;
542 0 : slot += 1;
543 :
544 0 : buffers++;
545 0 : n_left--;
546 : }
547 0 : no_free_slots:
548 :
549 : /* copy data */
550 0 : n_copy_op = vec_len (ptd->copy_ops);
551 0 : co = ptd->copy_ops;
552 0 : while (n_copy_op >= 8)
553 : {
554 0 : clib_prefetch_load (co[4].data);
555 0 : clib_prefetch_load (co[5].data);
556 0 : clib_prefetch_load (co[6].data);
557 0 : clib_prefetch_load (co[7].data);
558 :
559 0 : b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
560 0 : b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
561 0 : b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
562 0 : b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
563 :
564 0 : if (PREDICT_TRUE (!fallback))
565 : {
566 0 : vlib_dma_batch_add (vm, b, co[0].data,
567 0 : b0->data + co[0].buffer_offset, co[0].data_len);
568 0 : vlib_dma_batch_add (vm, b, co[1].data,
569 0 : b1->data + co[1].buffer_offset, co[1].data_len);
570 0 : vlib_dma_batch_add (vm, b, co[2].data,
571 0 : b2->data + co[2].buffer_offset, co[2].data_len);
572 0 : vlib_dma_batch_add (vm, b, co[3].data,
573 0 : b3->data + co[3].buffer_offset, co[3].data_len);
574 : }
575 : else
576 : {
577 0 : clib_memcpy_fast (co[0].data, b0->data + co[0].buffer_offset,
578 0 : co[0].data_len);
579 0 : clib_memcpy_fast (co[1].data, b1->data + co[1].buffer_offset,
580 0 : co[1].data_len);
581 0 : clib_memcpy_fast (co[2].data, b2->data + co[2].buffer_offset,
582 0 : co[2].data_len);
583 0 : clib_memcpy_fast (co[3].data, b3->data + co[3].buffer_offset,
584 0 : co[3].data_len);
585 : }
586 :
587 0 : co += 4;
588 0 : n_copy_op -= 4;
589 : }
590 0 : while (n_copy_op)
591 : {
592 0 : b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
593 0 : if (PREDICT_TRUE (!fallback))
594 0 : vlib_dma_batch_add (vm, b, co[0].data, b0->data + co[0].buffer_offset,
595 : co[0].data_len);
596 : else
597 0 : clib_memcpy_fast (co[0].data, b0->data + co[0].buffer_offset,
598 0 : co[0].data_len);
599 0 : co += 1;
600 0 : n_copy_op -= 1;
601 : }
602 :
603 : /* save dma info before retry */
604 0 : dma_info->dma_tail = slot;
605 0 : mq->dma_tail = slot;
606 0 : vec_reset_length (ptd->copy_ops);
607 :
608 0 : if (n_left && n_retries--)
609 0 : goto retry;
610 :
611 0 : if (PREDICT_TRUE (!fallback))
612 : {
613 0 : vlib_dma_batch_set_cookie (vm, b,
614 0 : ((u64) mif_id << 16) | (mq - mif->tx_queues));
615 0 : vlib_dma_batch_submit (vm, b);
616 0 : dma_info->finished = 0;
617 :
618 0 : if (b->n_enq)
619 : {
620 0 : mq->dma_info_tail++;
621 0 : if (mq->dma_info_tail == mq->dma_info_size)
622 0 : mq->dma_info_tail = 0;
623 : }
624 : }
625 0 : else if (fallback && dma_info->finished)
626 : {
627 : /* if dma has been completed, update ring immediately */
628 0 : vlib_buffer_free (vm, ptd->buffers, vec_len (ptd->buffers));
629 0 : vec_reset_length (ptd->buffers);
630 0 : __atomic_store_n (&mq->ring->tail, slot, __ATOMIC_RELEASE);
631 : }
632 :
633 0 : return n_left;
634 : }
635 :
636 2259 : VNET_DEVICE_CLASS_TX_FN (memif_device_class) (vlib_main_t * vm,
637 : vlib_node_runtime_t * node,
638 : vlib_frame_t * frame)
639 : {
640 23 : memif_main_t *nm = &memif_main;
641 23 : vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
642 23 : memif_if_t *mif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
643 23 : vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame);
644 : memif_queue_t *mq;
645 23 : u32 qid = tf->queue_id;
646 23 : u32 *from, thread_index = vm->thread_index;
647 23 : memif_per_thread_data_t *ptd = vec_elt_at_index (memif_main.per_thread_data,
648 : thread_index);
649 : uword n_left;
650 :
651 23 : ASSERT (vec_len (mif->tx_queues) > qid);
652 23 : mq = vec_elt_at_index (mif->tx_queues, qid);
653 :
654 23 : if (tf->shared_queue)
655 0 : clib_spinlock_lock (&mq->lockp);
656 :
657 23 : from = vlib_frame_vector_args (frame);
658 23 : n_left = frame->n_vectors;
659 23 : if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
660 : n_left =
661 11 : memif_interface_tx_zc_inline (vm, node, from, mif, mq, ptd, n_left);
662 12 : else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
663 0 : n_left = memif_interface_tx_inline (vm, node, from, mif, MEMIF_RING_S2M,
664 : mq, ptd, n_left);
665 : else
666 : {
667 12 : if ((mif->flags & MEMIF_IF_FLAG_USE_DMA) && (mif->dma_tx_config >= 0))
668 0 : n_left = memif_interface_tx_dma_inline (vm, node, from, mif,
669 : MEMIF_RING_M2S, mq, n_left);
670 : else
671 12 : n_left = memif_interface_tx_inline (vm, node, from, mif,
672 : MEMIF_RING_M2S, mq, ptd, n_left);
673 : }
674 :
675 23 : if (tf->shared_queue)
676 0 : clib_spinlock_unlock (&mq->lockp);
677 :
678 23 : if (n_left)
679 0 : vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
680 : n_left);
681 :
682 23 : if ((mq->ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
683 : {
684 0 : u64 b = 1;
685 0 : int __clib_unused r = write (mq->int_fd, &b, sizeof (b));
686 0 : mq->int_count++;
687 : }
688 :
689 23 : if ((mif->flags & MEMIF_IF_FLAG_USE_DMA) && (mif->dma_tx_config >= 0))
690 : {
691 0 : if (n_left)
692 0 : vlib_buffer_free (vm, from + frame->n_vectors - n_left, n_left);
693 : }
694 23 : else if ((mif->flags & MEMIF_IF_FLAG_ZERO_COPY) == 0)
695 12 : vlib_buffer_free (vm, from, frame->n_vectors);
696 11 : else if (n_left)
697 0 : vlib_buffer_free (vm, from + frame->n_vectors - n_left, n_left);
698 :
699 23 : return frame->n_vectors - n_left;
700 : }
701 :
702 : static void
703 0 : memif_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
704 : u32 node_index)
705 : {
706 0 : memif_main_t *apm = &memif_main;
707 0 : vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
708 0 : memif_if_t *mif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
709 :
710 : /* Shut off redirection */
711 0 : if (node_index == ~0)
712 : {
713 0 : mif->per_interface_next_index = node_index;
714 0 : return;
715 : }
716 :
717 0 : mif->per_interface_next_index =
718 0 : vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index);
719 : }
720 :
721 : static void
722 0 : memif_clear_hw_interface_counters (u32 instance)
723 : {
724 : /* Nothing for now */
725 0 : }
726 :
727 : static clib_error_t *
728 0 : memif_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
729 : vnet_hw_if_rx_mode mode)
730 : {
731 0 : memif_main_t *mm = &memif_main;
732 0 : vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
733 0 : memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
734 0 : memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
735 :
736 0 : if (mode == VNET_HW_IF_RX_MODE_POLLING)
737 0 : mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT;
738 : else
739 0 : mq->ring->flags &= ~MEMIF_RING_FLAG_MASK_INT;
740 :
741 0 : return 0;
742 : }
743 :
744 : static clib_error_t *
745 0 : memif_subif_add_del_function (vnet_main_t * vnm,
746 : u32 hw_if_index,
747 : struct vnet_sw_interface_t *st, int is_add)
748 : {
749 : /* Nothing for now */
750 0 : return 0;
751 : }
752 :
753 : /* *INDENT-OFF* */
754 5039 : VNET_DEVICE_CLASS (memif_device_class) = {
755 : .name = "memif",
756 : .format_device_name = format_memif_device_name,
757 : .format_device = format_memif_device,
758 : .format_tx_trace = format_memif_tx_trace,
759 : .tx_function_n_errors = MEMIF_TX_N_ERROR,
760 : .tx_function_error_counters = memif_tx_func_error_counters,
761 : .rx_redirect_to_node = memif_set_interface_next_node,
762 : .clear_counters = memif_clear_hw_interface_counters,
763 : .admin_up_down_function = memif_interface_admin_up_down,
764 : .subif_add_del_function = memif_subif_add_del_function,
765 : .rx_mode_change_function = memif_interface_rx_mode_change,
766 : };
767 :
768 : /* *INDENT-ON* */
769 :
770 : /*
771 : * fd.io coding-style-patch-verification: ON
772 : *
773 : * Local Variables:
774 : * eval: (c-set-style "gnu")
775 : * End:
776 : */
|