Line data Source code
1 : /*
2 : *------------------------------------------------------------------
3 : * Copyright (c) 2018 Cisco and/or its affiliates.
4 : * Licensed under the Apache License, Version 2.0 (the "License");
5 : * you may not use this file except in compliance with the License.
6 : * You may obtain a copy of the License at:
7 : *
8 : * http://www.apache.org/licenses/LICENSE-2.0
9 : *
10 : * Unless required by applicable law or agreed to in writing, software
11 : * distributed under the License is distributed on an "AS IS" BASIS,
12 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 : * See the License for the specific language governing permissions and
14 : * limitations under the License.
15 : *------------------------------------------------------------------
16 : */
17 :
18 : #include <vlib/vlib.h>
19 : #include <vlib/unix/unix.h>
20 : #include <vlib/pci/pci.h>
21 : #include <vnet/ethernet/ethernet.h>
22 : #include <vnet/devices/devices.h>
23 : #include <vnet/ip/ip6_packet.h>
24 : #include <vnet/ip/ip4_packet.h>
25 : #include <vnet/udp/udp_packet.h>
26 : #include <vnet/tcp/tcp_packet.h>
27 : #include <vnet/interface/rx_queue_funcs.h>
28 : #include <vmxnet3/vmxnet3.h>
29 :
30 : #define foreach_vmxnet3_input_error \
31 : _(BUFFER_ALLOC, "buffer alloc error") \
32 : _(RX_PACKET_NO_SOP, "Rx packet error - no SOP") \
33 : _(RX_PACKET, "Rx packet error") \
34 : _(RX_PACKET_EOP, "Rx packet error found on EOP") \
35 : _(NO_BUFFER, "Rx no buffer error")
36 :
37 : typedef enum
38 : {
39 : #define _(f,s) VMXNET3_INPUT_ERROR_##f,
40 : foreach_vmxnet3_input_error
41 : #undef _
42 : VMXNET3_INPUT_N_ERROR,
43 : } vmxnet3_input_error_t;
44 :
45 : static __clib_unused char *vmxnet3_input_error_strings[] = {
46 : #define _(n,s) s,
47 : foreach_vmxnet3_input_error
48 : #undef _
49 : };
50 :
51 : static_always_inline u16
52 0 : vmxnet3_find_rid (vmxnet3_device_t * vd, vmxnet3_rx_comp * rx_comp)
53 : {
54 : u32 rid;
55 :
56 : // rid is bits 16-25 (10 bits number)
57 0 : rid = rx_comp->index & (0xffffffff >> 6);
58 0 : rid >>= 16;
59 0 : if ((rid >= vd->num_rx_queues) && (rid < (vd->num_rx_queues << 1)))
60 0 : return 1;
61 : else
62 0 : return 0;
63 : }
64 :
65 : static_always_inline void
66 0 : vmxnet3_rx_comp_ring_advance_next (vmxnet3_rxq_t * rxq)
67 : {
68 0 : vmxnet3_rx_comp_ring *comp_ring = &rxq->rx_comp_ring;
69 :
70 0 : comp_ring->next++;
71 0 : if (PREDICT_FALSE (comp_ring->next == rxq->size))
72 : {
73 0 : comp_ring->next = 0;
74 0 : comp_ring->gen ^= VMXNET3_RXCF_GEN;
75 : }
76 0 : }
77 :
78 : static_always_inline void
79 0 : vmxnet3_handle_offload (vmxnet3_rx_comp * rx_comp, vlib_buffer_t * hb,
80 : u16 gso_size)
81 : {
82 0 : u8 l4_hdr_sz = 0;
83 0 : vnet_buffer_oflags_t oflags = 0;
84 :
85 0 : if (rx_comp->flags & VMXNET3_RXCF_IP4)
86 : {
87 0 : ip4_header_t *ip4 = (ip4_header_t *) (hb->data +
88 : sizeof (ethernet_header_t));
89 :
90 0 : vnet_buffer (hb)->l2_hdr_offset = 0;
91 0 : vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
92 0 : vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
93 0 : ip4_header_bytes (ip4);
94 0 : hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
95 : VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
96 : VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4;
97 :
98 : /* checksum offload */
99 0 : if (!(rx_comp->index & VMXNET3_RXCI_CNC))
100 : {
101 0 : if (!(rx_comp->flags & VMXNET3_RXCF_IPC))
102 : {
103 0 : oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
104 0 : ip4->checksum = 0;
105 : }
106 0 : if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
107 : {
108 0 : if (rx_comp->flags & VMXNET3_RXCF_TCP)
109 : {
110 0 : oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
111 : }
112 0 : else if (rx_comp->flags & VMXNET3_RXCF_UDP)
113 : {
114 0 : oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
115 : }
116 : }
117 : }
118 :
119 0 : if (gso_size)
120 : {
121 0 : if (rx_comp->flags & VMXNET3_RXCF_TCP)
122 : {
123 0 : tcp_header_t *tcp =
124 0 : (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
125 0 : l4_hdr_sz = tcp_header_bytes (tcp);
126 : }
127 0 : else if (rx_comp->flags & VMXNET3_RXCF_UDP)
128 : {
129 0 : udp_header_t *udp =
130 0 : (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
131 0 : l4_hdr_sz = sizeof (*udp);
132 : }
133 0 : vnet_buffer2 (hb)->gso_size = gso_size;
134 0 : vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
135 0 : hb->flags |= VNET_BUFFER_F_GSO;
136 : }
137 : }
138 0 : else if (rx_comp->flags & VMXNET3_RXCF_IP6)
139 : {
140 0 : vnet_buffer (hb)->l2_hdr_offset = 0;
141 0 : vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
142 0 : vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
143 : sizeof (ip6_header_t);
144 0 : hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
145 : VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
146 : VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6;
147 :
148 : /* checksum offload */
149 0 : if (!(rx_comp->index & VMXNET3_RXCI_CNC))
150 : {
151 0 : if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
152 : {
153 0 : if (rx_comp->flags & VMXNET3_RXCF_TCP)
154 : {
155 0 : tcp_header_t *tcp =
156 0 : (tcp_header_t *) (hb->data +
157 0 : vnet_buffer (hb)->l4_hdr_offset);
158 0 : oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
159 0 : tcp->checksum = 0;
160 : }
161 0 : else if (rx_comp->flags & VMXNET3_RXCF_UDP)
162 : {
163 0 : udp_header_t *udp =
164 0 : (udp_header_t *) (hb->data +
165 0 : vnet_buffer (hb)->l4_hdr_offset);
166 0 : oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
167 0 : udp->checksum = 0;
168 : }
169 : }
170 : }
171 :
172 0 : if (gso_size)
173 : {
174 0 : if (rx_comp->flags & VMXNET3_RXCF_TCP)
175 : {
176 0 : tcp_header_t *tcp =
177 0 : (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
178 0 : l4_hdr_sz = tcp_header_bytes (tcp);
179 : }
180 0 : else if (rx_comp->flags & VMXNET3_RXCF_UDP)
181 : {
182 0 : udp_header_t *udp =
183 0 : (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
184 0 : l4_hdr_sz = sizeof (*udp);
185 : }
186 0 : vnet_buffer2 (hb)->gso_size = gso_size;
187 0 : vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
188 0 : hb->flags |= VNET_BUFFER_F_GSO;
189 : }
190 : }
191 0 : if (oflags)
192 0 : vnet_buffer_offload_flags_set (hb, oflags);
193 0 : }
194 :
195 : static_always_inline uword
196 0 : vmxnet3_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
197 : vlib_frame_t * frame, vmxnet3_device_t * vd,
198 : u16 qid)
199 : {
200 0 : vnet_main_t *vnm = vnet_get_main ();
201 0 : uword n_trace = vlib_get_trace_count (vm, node);
202 0 : u32 n_rx_packets = 0, n_rx_bytes = 0;
203 : vmxnet3_rx_comp *rx_comp;
204 : u32 desc_idx;
205 : vmxnet3_rxq_t *rxq;
206 0 : u32 thread_index = vm->thread_index;
207 : u32 buffer_indices[VLIB_FRAME_SIZE], *bi;
208 : u16 nexts[VLIB_FRAME_SIZE], *next;
209 : vmxnet3_rx_ring *ring;
210 : vmxnet3_rx_comp_ring *comp_ring;
211 : u16 rid;
212 0 : vlib_buffer_t *prev_b0 = 0, *hb = 0;
213 0 : u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
214 0 : u8 known_next = 0, got_packet = 0;
215 : vmxnet3_rx_desc *rxd;
216 : clib_error_t *error;
217 0 : u16 gso_size = 0;
218 :
219 0 : rxq = vec_elt_at_index (vd->rxqs, qid);
220 0 : comp_ring = &rxq->rx_comp_ring;
221 0 : bi = buffer_indices;
222 0 : next = nexts;
223 0 : rx_comp = &rxq->rx_comp[comp_ring->next];
224 :
225 0 : while (PREDICT_TRUE ((n_rx_packets < VLIB_FRAME_SIZE) &&
226 : (comp_ring->gen ==
227 : (rx_comp->flags & VMXNET3_RXCF_GEN))))
228 : {
229 : vlib_buffer_t *b0;
230 : u32 bi0;
231 :
232 0 : rid = vmxnet3_find_rid (vd, rx_comp);
233 0 : ring = &rxq->rx_ring[rid];
234 :
235 0 : if (PREDICT_TRUE (ring->fill >= 1))
236 0 : ring->fill--;
237 : else
238 : {
239 0 : vlib_error_count (vm, node->node_index,
240 : VMXNET3_INPUT_ERROR_NO_BUFFER, 1);
241 0 : if (hb)
242 : {
243 0 : vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
244 0 : hb = 0;
245 : }
246 0 : prev_b0 = 0;
247 0 : break;
248 : }
249 :
250 0 : desc_idx = rx_comp->index & VMXNET3_RXC_INDEX;
251 0 : ring->consume = desc_idx;
252 0 : rxd = &rxq->rx_desc[rid][desc_idx];
253 :
254 0 : bi0 = ring->bufs[desc_idx];
255 0 : ring->bufs[desc_idx] = ~0;
256 :
257 0 : b0 = vlib_get_buffer (vm, bi0);
258 0 : vnet_buffer (b0)->sw_if_index[VLIB_RX] = vd->sw_if_index;
259 0 : vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
260 0 : vnet_buffer (b0)->feature_arc_index = 0;
261 0 : b0->current_length = rx_comp->len & VMXNET3_RXCL_LEN_MASK;
262 0 : b0->current_data = 0;
263 0 : b0->total_length_not_including_first_buffer = 0;
264 0 : b0->next_buffer = 0;
265 0 : b0->flags = 0;
266 0 : b0->error = 0;
267 0 : b0->current_config_index = 0;
268 :
269 0 : if (PREDICT_FALSE ((rx_comp->index & VMXNET3_RXCI_EOP) &&
270 : (rx_comp->len & VMXNET3_RXCL_ERROR)))
271 : {
272 0 : vlib_buffer_free_one (vm, bi0);
273 0 : vlib_error_count (vm, node->node_index,
274 : VMXNET3_INPUT_ERROR_RX_PACKET_EOP, 1);
275 0 : if (hb && vlib_get_buffer_index (vm, hb) != bi0)
276 : {
277 0 : vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
278 0 : hb = 0;
279 : }
280 0 : prev_b0 = 0;
281 0 : goto next;
282 : }
283 :
284 0 : if (rx_comp->index & VMXNET3_RXCI_SOP)
285 : {
286 0 : ASSERT (!(rxd->flags & VMXNET3_RXF_BTYPE));
287 : /* start segment */
288 0 : if (vd->gso_enable &&
289 0 : (rx_comp->flags & VMXNET3_RXCF_CT) == VMXNET3_RXCOMP_TYPE_LRO)
290 : {
291 0 : vmxnet3_rx_comp_ext *lro = (vmxnet3_rx_comp_ext *) rx_comp;
292 :
293 0 : gso_size = lro->flags & VMXNET3_RXECF_MSS_MASK;
294 : }
295 :
296 0 : hb = b0;
297 0 : bi[0] = bi0;
298 0 : if (!(rx_comp->index & VMXNET3_RXCI_EOP))
299 : {
300 0 : hb->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
301 0 : prev_b0 = b0;
302 : }
303 : else
304 : {
305 : /*
306 : * Both start and end of packet is set. It is a complete packet
307 : */
308 0 : prev_b0 = 0;
309 0 : got_packet = 1;
310 : }
311 : }
312 0 : else if (rx_comp->index & VMXNET3_RXCI_EOP)
313 : {
314 : /* end of segment */
315 0 : if (PREDICT_TRUE (prev_b0 != 0))
316 : {
317 0 : if (PREDICT_TRUE (b0->current_length != 0))
318 : {
319 0 : prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
320 0 : prev_b0->next_buffer = bi0;
321 0 : hb->total_length_not_including_first_buffer +=
322 0 : b0->current_length;
323 : }
324 : else
325 : {
326 0 : vlib_buffer_free_one (vm, bi0);
327 : }
328 0 : prev_b0 = 0;
329 0 : got_packet = 1;
330 : }
331 : else
332 : {
333 : /* EOP without SOP, error */
334 0 : vlib_error_count (vm, node->node_index,
335 : VMXNET3_INPUT_ERROR_RX_PACKET_NO_SOP, 1);
336 0 : vlib_buffer_free_one (vm, bi0);
337 0 : if (hb && vlib_get_buffer_index (vm, hb) != bi0)
338 : {
339 0 : vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
340 0 : hb = 0;
341 : }
342 0 : goto next;
343 : }
344 : }
345 0 : else if (prev_b0) // !sop && !eop
346 : {
347 : /* mid chain */
348 0 : ASSERT (rxd->flags & VMXNET3_RXF_BTYPE);
349 0 : prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
350 0 : prev_b0->next_buffer = bi0;
351 0 : prev_b0 = b0;
352 0 : hb->total_length_not_including_first_buffer += b0->current_length;
353 : }
354 : else
355 : {
356 0 : vlib_error_count (vm, node->node_index,
357 : VMXNET3_INPUT_ERROR_RX_PACKET, 1);
358 0 : vlib_buffer_free_one (vm, bi0);
359 0 : if (hb && vlib_get_buffer_index (vm, hb) != bi0)
360 : {
361 0 : vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
362 0 : hb = 0;
363 : }
364 0 : goto next;
365 : }
366 :
367 0 : n_rx_bytes += b0->current_length;
368 :
369 0 : if (got_packet)
370 : {
371 0 : if (PREDICT_FALSE (vd->per_interface_next_index != ~0))
372 : {
373 0 : next_index = vd->per_interface_next_index;
374 0 : known_next = 1;
375 : }
376 :
377 0 : if (PREDICT_FALSE
378 : (vnet_device_input_have_features (vd->sw_if_index)))
379 : {
380 0 : vnet_feature_start_device_input_x1 (vd->sw_if_index,
381 : &next_index, hb);
382 0 : known_next = 1;
383 : }
384 :
385 0 : if (PREDICT_FALSE (known_next))
386 0 : next[0] = next_index;
387 : else
388 : {
389 0 : ethernet_header_t *e = (ethernet_header_t *) hb->data;
390 :
391 0 : next[0] = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
392 0 : if (!ethernet_frame_is_tagged (ntohs (e->type)))
393 0 : vmxnet3_handle_offload (rx_comp, hb, gso_size);
394 : }
395 :
396 0 : n_rx_packets++;
397 0 : next++;
398 0 : bi++;
399 0 : hb = 0;
400 0 : got_packet = 0;
401 0 : gso_size = 0;
402 : }
403 :
404 0 : next:
405 0 : vmxnet3_rx_comp_ring_advance_next (rxq);
406 0 : rx_comp = &rxq->rx_comp[comp_ring->next];
407 : }
408 :
409 0 : if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
410 : {
411 0 : u32 n_left = n_rx_packets;
412 :
413 0 : bi = buffer_indices;
414 0 : next = nexts;
415 0 : while (n_trace && n_left)
416 : {
417 0 : vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
418 0 : if (PREDICT_TRUE
419 : (vlib_trace_buffer
420 : (vm, node, next[0], b, /* follow_chain */ 0)))
421 : {
422 : vmxnet3_input_trace_t *tr =
423 0 : vlib_add_trace (vm, node, b, sizeof (*tr));
424 0 : tr->next_index = next[0];
425 0 : tr->hw_if_index = vd->hw_if_index;
426 0 : tr->buffer = *b;
427 0 : n_trace--;
428 : }
429 0 : n_left--;
430 0 : bi++;
431 0 : next++;
432 : }
433 0 : vlib_set_trace_count (vm, node, n_trace);
434 : }
435 :
436 0 : if (PREDICT_TRUE (n_rx_packets))
437 : {
438 0 : vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts,
439 : n_rx_packets);
440 0 : vlib_increment_combined_counter
441 : (vnm->interface_main.combined_sw_if_counters +
442 : VNET_INTERFACE_COUNTER_RX, thread_index,
443 : vd->sw_if_index, n_rx_packets, n_rx_bytes);
444 : }
445 :
446 0 : error = vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
447 0 : if (PREDICT_FALSE (error != 0))
448 : {
449 0 : vlib_error_count (vm, node->node_index,
450 : VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
451 : }
452 0 : error = vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
453 0 : if (PREDICT_FALSE (error != 0))
454 : {
455 0 : vlib_error_count (vm, node->node_index,
456 : VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
457 : }
458 :
459 0 : return n_rx_packets;
460 : }
461 :
462 2236 : VLIB_NODE_FN (vmxnet3_input_node) (vlib_main_t * vm,
463 : vlib_node_runtime_t * node,
464 : vlib_frame_t * frame)
465 : {
466 0 : u32 n_rx = 0;
467 0 : vmxnet3_main_t *vmxm = &vmxnet3_main;
468 0 : vnet_hw_if_rxq_poll_vector_t *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
469 : vnet_hw_if_rxq_poll_vector_t *pve;
470 :
471 0 : vec_foreach (pve, pv)
472 : {
473 : vmxnet3_device_t *vd;
474 0 : vd = vec_elt_at_index (vmxm->devices, pve->dev_instance);
475 0 : if ((vd->flags & VMXNET3_DEVICE_F_ADMIN_UP) == 0)
476 0 : continue;
477 0 : n_rx += vmxnet3_device_input_inline (vm, node, frame, vd, pve->queue_id);
478 : }
479 0 : return n_rx;
480 : }
481 :
482 : #ifndef CLIB_MARCH_VARIANT
483 : /* *INDENT-OFF* */
484 16799 : VLIB_REGISTER_NODE (vmxnet3_input_node) = {
485 : .name = "vmxnet3-input",
486 : .sibling_of = "device-input",
487 : .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
488 : .format_trace = format_vmxnet3_input_trace,
489 : .type = VLIB_NODE_TYPE_INPUT,
490 : .state = VLIB_NODE_STATE_DISABLED,
491 : .n_errors = VMXNET3_INPUT_N_ERROR,
492 : .error_strings = vmxnet3_input_error_strings,
493 : };
494 : #endif
495 :
496 : /* *INDENT-ON* */
497 :
498 : /*
499 : * fd.io coding-style-patch-verification: ON
500 : *
501 : * Local Variables:
502 : * eval: (c-set-style "gnu")
503 : * End:
504 : */
|