Line data Source code
1 : /*
2 : *------------------------------------------------------------------
3 : * Copyright (c) 2020 Intel and/or its affiliates.
4 : * Licensed under the Apache License, Version 2.0 (the "License");
5 : * you may not use this file except in compliance with the License.
6 : * You may obtain a copy of the License at:
7 : *
8 : * http://www.apache.org/licenses/LICENSE-2.0
9 : *
10 : * Unless required by applicable law or agreed to in writing, software
11 : * distributed under the License is distributed on an "AS IS" BASIS,
12 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 : * See the License for the specific language governing permissions and
14 : * limitations under the License.
15 : *------------------------------------------------------------------
16 : */
17 :
18 : #include <vppinfra/mem.h>
19 : #include "avf_advanced_flow.h"
20 :
21 : #define AVF_FDIR_IPV6_TC_OFFSET 20
22 : #define AVF_IPV6_TC_MASK (0xFF << AVF_FDIR_IPV6_TC_OFFSET)
23 : #define AVF_FDIR_MAX_QREGION_SIZE 128
24 :
25 : /*
26 : * Return the last (most-significant) bit set.
27 : */
28 : static inline int
29 0 : fls_u32 (u32 x)
30 : {
31 0 : return (x == 0) ? 0 : 64 - count_leading_zeros (x);
32 : }
33 :
34 : static inline int
35 0 : ether_addr_is_zero (const struct avf_ether_addr *ea)
36 : {
37 0 : const u16 *w = (const u16 *) ea;
38 :
39 0 : return (w[0] | w[1] | w[2]) == 0;
40 : }
41 :
42 : int
43 0 : avf_fdir_rcfg_create (struct avf_fdir_conf **rcfg, int tunnel_level, u16 vsi,
44 : u16 nrxq)
45 : {
46 0 : (*rcfg) = clib_mem_alloc (sizeof (**rcfg));
47 0 : if ((*rcfg) == NULL)
48 : {
49 0 : return -1;
50 : }
51 :
52 0 : clib_memset (*rcfg, 0, sizeof (**rcfg));
53 :
54 0 : (*rcfg)->add_fltr.rule_cfg.proto_hdrs.tunnel_level = tunnel_level;
55 0 : (*rcfg)->vsi = vsi;
56 0 : (*rcfg)->nb_rx_queues = nrxq;
57 :
58 0 : return 0;
59 : }
60 :
61 : int
62 0 : avf_fdir_rcfg_destroy (struct avf_fdir_conf *rcfg)
63 : {
64 0 : clib_mem_free (rcfg);
65 :
66 0 : return 0;
67 : }
68 :
69 : int
70 0 : avf_fdir_rcfg_set_hdr (struct avf_fdir_conf *rcfg, int layer,
71 : enum virtchnl_proto_hdr_type hdr)
72 : {
73 : struct virtchnl_proto_hdrs *hdrs;
74 :
75 0 : hdrs = &rcfg->add_fltr.rule_cfg.proto_hdrs;
76 0 : if (layer >= VIRTCHNL_MAX_NUM_PROTO_HDRS)
77 0 : return -1;
78 :
79 0 : hdrs->proto_hdr[layer].type = hdr;
80 :
81 0 : return 0;
82 : }
83 :
84 : int
85 0 : avf_fdir_rcfg_set_field (struct avf_fdir_conf *rcfg, int layer,
86 : struct avf_flow_item *item,
87 : struct avf_flow_error *error)
88 : {
89 : const struct avf_ipv4_hdr *ipv4_spec, *ipv4_mask;
90 : const struct avf_ipv6_hdr *ipv6_spec, *ipv6_mask;
91 : const struct avf_udp_hdr *udp_spec, *udp_mask;
92 : const struct avf_tcp_hdr *tcp_spec, *tcp_mask;
93 : const struct avf_sctp_hdr *sctp_spec, *sctp_mask;
94 : const struct avf_gtp_hdr *gtp_spec, *gtp_mask;
95 : const struct avf_gtp_psc_hdr *gtp_psc_spec, *gtp_psc_mask;
96 : const struct avf_l2tpv3oip_hdr *l2tpv3oip_spec, *l2tpv3oip_mask;
97 : const struct avf_esp_hdr *esp_spec, *esp_mask;
98 : const struct avf_ah_hdr *ah_spec, *ah_mask;
99 : const struct avf_pfcp_hdr *pfcp_spec, *pfcp_mask;
100 : const struct avf_flow_eth_hdr *eth_spec, *eth_mask;
101 :
102 : struct virtchnl_proto_hdr *hdr;
103 : enum avf_flow_item_type type;
104 : u16 ether_type;
105 0 : int ret = 0;
106 :
107 0 : u8 ipv6_addr_mask[16] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
108 : 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
109 :
110 0 : hdr = &rcfg->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
111 0 : type = item->type;
112 :
113 0 : switch (type)
114 : {
115 0 : case AVF_FLOW_ITEM_TYPE_ETH:
116 0 : eth_spec = item->spec;
117 0 : eth_mask = item->mask;
118 :
119 0 : hdr->type = VIRTCHNL_PROTO_HDR_ETH;
120 :
121 0 : if (eth_spec && eth_mask)
122 : {
123 0 : if (!ether_addr_is_zero (ð_mask->src) ||
124 0 : !ether_addr_is_zero (ð_mask->dst))
125 : {
126 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
127 : AVF_FLOW_ERROR_TYPE_ITEM, item,
128 : "Invalid MAC_addr mask.");
129 0 : return ret;
130 : }
131 :
132 0 : if (eth_mask->type)
133 : {
134 0 : if (eth_mask->type != 0xffff)
135 : {
136 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
137 : AVF_FLOW_ERROR_TYPE_ITEM, item,
138 : "Invalid type mask.");
139 0 : return ret;
140 : }
141 : }
142 : }
143 :
144 0 : if (eth_spec && eth_mask && eth_mask->type)
145 : {
146 0 : ether_type = clib_net_to_host_u16 (eth_spec->type);
147 0 : if (ether_type == AVF_ETHER_TYPE_IPV4 ||
148 : ether_type == AVF_ETHER_TYPE_IPV6)
149 : {
150 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
151 : AVF_FLOW_ERROR_TYPE_ITEM, item,
152 : "Unsupported ether_type.");
153 0 : return ret;
154 : }
155 :
156 0 : rcfg->input_set |= AVF_INSET_ETHERTYPE;
157 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, ETH, ETHERTYPE);
158 :
159 0 : clib_memcpy (hdr->buffer, eth_spec, sizeof (*eth_spec));
160 : }
161 0 : break;
162 :
163 0 : case AVF_FLOW_ITEM_TYPE_IPV4:
164 0 : ipv4_spec = item->spec;
165 0 : ipv4_mask = item->mask;
166 0 : hdr->type = VIRTCHNL_PROTO_HDR_IPV4;
167 :
168 0 : if (ipv4_spec && ipv4_mask)
169 : {
170 0 : if (ipv4_mask->version_ihl || ipv4_mask->total_length ||
171 0 : ipv4_mask->packet_id || ipv4_mask->fragment_offset ||
172 0 : ipv4_mask->hdr_checksum)
173 : {
174 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
175 : AVF_FLOW_ERROR_TYPE_ITEM, item,
176 : "Invalid IPv4 mask.");
177 0 : return ret;
178 : }
179 :
180 0 : if (ipv4_mask->type_of_service == 0xff)
181 : {
182 0 : rcfg->input_set |= AVF_INSET_IPV4_TOS;
183 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, DSCP);
184 : }
185 :
186 0 : if (ipv4_mask->next_proto_id == 0xff)
187 : {
188 0 : rcfg->input_set |= AVF_INSET_IPV4_PROTO;
189 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, PROT);
190 : }
191 :
192 0 : if (ipv4_mask->time_to_live == 0xff)
193 : {
194 0 : rcfg->input_set |= AVF_INSET_IPV4_TTL;
195 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, TTL);
196 : }
197 :
198 0 : if (ipv4_mask->src_addr == 0xffffffff)
199 : {
200 0 : rcfg->input_set |= AVF_INSET_IPV4_SRC;
201 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, SRC);
202 : }
203 :
204 0 : if (ipv4_mask->dst_addr == 0xffffffff)
205 : {
206 0 : rcfg->input_set |= AVF_INSET_IPV4_DST;
207 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV4, DST);
208 : }
209 :
210 0 : clib_memcpy (hdr->buffer, ipv4_spec, sizeof (*ipv4_spec));
211 : }
212 0 : break;
213 :
214 0 : case AVF_FLOW_ITEM_TYPE_IPV6:
215 0 : ipv6_spec = item->spec;
216 0 : ipv6_mask = item->mask;
217 0 : hdr->type = VIRTCHNL_PROTO_HDR_IPV6;
218 :
219 0 : if (ipv6_spec && ipv6_mask)
220 : {
221 0 : if (ipv6_mask->payload_len)
222 : {
223 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
224 : AVF_FLOW_ERROR_TYPE_ITEM, item,
225 : "Invalid IPv6 mask");
226 0 : return ret;
227 : }
228 :
229 0 : if ((ipv6_mask->vtc_flow &
230 0 : clib_host_to_net_u32 (AVF_IPV6_TC_MASK)) ==
231 0 : (clib_host_to_net_u32 (AVF_IPV6_TC_MASK)))
232 : {
233 0 : rcfg->input_set |= AVF_INSET_IPV6_TC;
234 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, TC);
235 : }
236 :
237 0 : if (ipv6_mask->proto == 0xff)
238 : {
239 0 : rcfg->input_set |= AVF_INSET_IPV6_NEXT_HDR;
240 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, PROT);
241 : }
242 :
243 0 : if (ipv6_mask->hop_limits == 0xff)
244 : {
245 0 : rcfg->input_set |= AVF_INSET_IPV6_HOP_LIMIT;
246 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, HOP_LIMIT);
247 : }
248 :
249 0 : if (!clib_memcmp (ipv6_mask->src_addr, ipv6_addr_mask,
250 : sizeof (ipv6_mask->src_addr)))
251 : {
252 0 : rcfg->input_set |= AVF_INSET_IPV6_SRC;
253 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, SRC);
254 : }
255 0 : if (!clib_memcmp (ipv6_mask->dst_addr, ipv6_addr_mask,
256 : sizeof (ipv6_mask->dst_addr)))
257 : {
258 0 : rcfg->input_set |= AVF_INSET_IPV6_DST;
259 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, IPV6, DST);
260 : }
261 :
262 0 : clib_memcpy (hdr->buffer, ipv6_spec, sizeof (*ipv6_spec));
263 : }
264 :
265 0 : break;
266 :
267 0 : case AVF_FLOW_ITEM_TYPE_UDP:
268 0 : udp_spec = item->spec;
269 0 : udp_mask = item->mask;
270 0 : hdr->type = VIRTCHNL_PROTO_HDR_UDP;
271 :
272 0 : if (udp_spec && udp_mask)
273 : {
274 0 : if (udp_mask->dgram_len || udp_mask->dgram_cksum)
275 : {
276 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
277 : AVF_FLOW_ERROR_TYPE_ITEM, item,
278 : "Invalid UDP mask");
279 0 : return ret;
280 : };
281 :
282 0 : if (udp_mask->src_port == 0xffff)
283 : {
284 0 : rcfg->input_set |= AVF_INSET_UDP_SRC_PORT;
285 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, UDP, SRC_PORT);
286 : }
287 :
288 0 : if (udp_mask->dst_port == 0xffff)
289 : {
290 0 : rcfg->input_set |= AVF_INSET_UDP_DST_PORT;
291 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, UDP, DST_PORT);
292 : }
293 :
294 0 : clib_memcpy (hdr->buffer, udp_spec, sizeof (*udp_spec));
295 : }
296 0 : break;
297 :
298 0 : case AVF_FLOW_ITEM_TYPE_TCP:
299 0 : tcp_spec = item->spec;
300 0 : tcp_mask = item->mask;
301 0 : hdr->type = VIRTCHNL_PROTO_HDR_TCP;
302 :
303 0 : if (tcp_spec && tcp_mask)
304 : {
305 0 : if (tcp_mask->sent_seq || tcp_mask->recv_ack || tcp_mask->data_off ||
306 0 : tcp_mask->tcp_flags || tcp_mask->rx_win || tcp_mask->cksum ||
307 0 : tcp_mask->tcp_urp)
308 : {
309 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
310 : AVF_FLOW_ERROR_TYPE_ITEM, item,
311 : "Invalid TCP mask");
312 0 : return ret;
313 : }
314 :
315 0 : if (tcp_mask->src_port == 0xffff)
316 : {
317 0 : rcfg->input_set |= AVF_INSET_TCP_SRC_PORT;
318 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, TCP, SRC_PORT);
319 : }
320 :
321 0 : if (tcp_mask->dst_port == 0xffff)
322 : {
323 0 : rcfg->input_set |= AVF_INSET_TCP_DST_PORT;
324 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, TCP, DST_PORT);
325 : }
326 :
327 0 : clib_memcpy (hdr->buffer, tcp_spec, sizeof (*tcp_spec));
328 : }
329 :
330 0 : break;
331 :
332 0 : case AVF_FLOW_ITEM_TYPE_SCTP:
333 0 : sctp_spec = item->spec;
334 0 : sctp_mask = item->mask;
335 0 : hdr->type = VIRTCHNL_PROTO_HDR_SCTP;
336 :
337 0 : if (sctp_spec && sctp_mask)
338 : {
339 0 : if (sctp_mask->cksum)
340 : {
341 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
342 : AVF_FLOW_ERROR_TYPE_ITEM, item,
343 : "Invalid UDP mask");
344 0 : return ret;
345 : }
346 :
347 0 : if (sctp_mask->src_port == 0xffff)
348 : {
349 0 : rcfg->input_set |= AVF_INSET_SCTP_SRC_PORT;
350 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, SCTP, SRC_PORT);
351 : }
352 :
353 0 : if (sctp_mask->dst_port == 0xffff)
354 : {
355 0 : rcfg->input_set |= AVF_INSET_SCTP_DST_PORT;
356 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, SCTP, DST_PORT);
357 : }
358 :
359 0 : clib_memcpy (hdr->buffer, sctp_spec, sizeof (*sctp_spec));
360 : }
361 0 : break;
362 :
363 0 : case AVF_FLOW_ITEM_TYPE_GTPU:
364 0 : gtp_spec = item->spec;
365 0 : gtp_mask = item->mask;
366 0 : hdr->type = VIRTCHNL_PROTO_HDR_GTPU_IP;
367 :
368 0 : if (gtp_spec && gtp_mask)
369 : {
370 0 : if (gtp_mask->v_pt_rsv_flags || gtp_mask->msg_type ||
371 0 : gtp_mask->msg_len)
372 : {
373 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
374 : AVF_FLOW_ERROR_TYPE_ITEM, item,
375 : "Invalid GTP mask");
376 0 : return ret;
377 : }
378 :
379 0 : if (gtp_mask->teid == 0xffffffff)
380 : {
381 0 : rcfg->input_set |= AVF_INSET_GTPU_TEID;
382 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, GTPU_IP, TEID);
383 : }
384 :
385 0 : clib_memcpy (hdr->buffer, gtp_spec, sizeof (*gtp_spec));
386 : }
387 :
388 0 : break;
389 :
390 0 : case AVF_FLOW_ITEM_TYPE_GTP_PSC:
391 0 : gtp_psc_spec = item->spec;
392 0 : gtp_psc_mask = item->mask;
393 0 : hdr->type = VIRTCHNL_PROTO_HDR_GTPU_EH;
394 :
395 0 : if (gtp_psc_spec && gtp_psc_mask)
396 : {
397 0 : if (gtp_psc_mask->qfi == 0xff)
398 : {
399 0 : rcfg->input_set |= AVF_INSET_GTPU_QFI;
400 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, GTPU_EH, QFI);
401 : }
402 :
403 0 : clib_memcpy (hdr->buffer, gtp_psc_spec, sizeof (*gtp_psc_spec));
404 : }
405 :
406 0 : break;
407 :
408 0 : case AVF_FLOW_ITEM_TYPE_L2TPV3OIP:
409 0 : l2tpv3oip_spec = item->spec;
410 0 : l2tpv3oip_mask = item->mask;
411 0 : hdr->type = VIRTCHNL_PROTO_HDR_L2TPV3;
412 :
413 0 : if (l2tpv3oip_spec && l2tpv3oip_mask)
414 : {
415 0 : if (l2tpv3oip_mask->session_id == 0xffffffff)
416 : {
417 0 : rcfg->input_set |= AVF_L2TPV3OIP_SESSION_ID;
418 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, L2TPV3, SESS_ID);
419 : }
420 :
421 0 : clib_memcpy (hdr->buffer, l2tpv3oip_spec, sizeof (*l2tpv3oip_spec));
422 : }
423 0 : break;
424 :
425 0 : case AVF_FLOW_ITEM_TYPE_ESP:
426 0 : esp_spec = item->spec;
427 0 : esp_mask = item->mask;
428 0 : hdr->type = VIRTCHNL_PROTO_HDR_ESP;
429 :
430 0 : if (esp_spec && esp_mask)
431 : {
432 0 : if (esp_mask->spi == 0xffffffff)
433 : {
434 0 : rcfg->input_set |= AVF_INSET_ESP_SPI;
435 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, ESP, SPI);
436 : }
437 :
438 0 : clib_memcpy (hdr->buffer, esp_spec, sizeof (*esp_spec));
439 : }
440 0 : break;
441 :
442 0 : case AVF_FLOW_ITEM_TYPE_AH:
443 0 : ah_spec = item->spec;
444 0 : ah_mask = item->mask;
445 0 : hdr->type = VIRTCHNL_PROTO_HDR_AH;
446 :
447 0 : if (ah_spec && ah_mask)
448 : {
449 0 : if (ah_mask->spi == 0xffffffff)
450 : {
451 0 : rcfg->input_set |= AVF_INSET_AH_SPI;
452 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, AH, SPI);
453 : }
454 :
455 0 : clib_memcpy (hdr->buffer, ah_spec, sizeof (*ah_spec));
456 : }
457 0 : break;
458 :
459 0 : case AVF_FLOW_ITEM_TYPE_PFCP:
460 0 : pfcp_spec = item->spec;
461 0 : pfcp_mask = item->mask;
462 0 : hdr->type = VIRTCHNL_PROTO_HDR_PFCP;
463 :
464 0 : if (pfcp_spec && pfcp_mask)
465 : {
466 0 : if (pfcp_mask->s_field == 0xff)
467 : {
468 0 : rcfg->input_set |= AVF_INSET_PFCP_S_FIELD;
469 0 : VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT (hdr, PFCP, S_FIELD);
470 : }
471 :
472 0 : clib_memcpy (hdr->buffer, pfcp_spec, sizeof (*pfcp_spec));
473 : }
474 0 : break;
475 :
476 0 : default:
477 0 : ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ITEM,
478 : item, "Invalid pattern item.");
479 0 : return ret;
480 : }
481 :
482 0 : return 0;
483 : }
484 :
485 : int
486 0 : avf_fdir_rcfg_act_queue (struct avf_fdir_conf *rcfg, int queue, int size,
487 : int act_idx)
488 : {
489 0 : if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
490 0 : return -AVF_FAILURE;
491 :
492 : struct virtchnl_filter_action *filter_action;
493 :
494 0 : filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
495 0 : filter_action->type = VIRTCHNL_ACTION_QUEUE;
496 0 : filter_action->act_conf.queue.index = queue;
497 :
498 0 : if (size == 1)
499 0 : return 0;
500 0 : else if (is_pow2 (size))
501 0 : filter_action->act_conf.queue.region = fls_u32 (size) - 1;
502 :
503 0 : return 0;
504 : }
505 :
506 : int
507 0 : avf_fdir_parse_action_qregion (struct avf_fdir_conf *rcfg,
508 : const struct avf_flow_action *act, int act_idx,
509 : struct avf_flow_error *error)
510 : {
511 0 : const struct avf_flow_action_rss *rss = act->conf;
512 : struct virtchnl_filter_action *filter_action;
513 : u32 i;
514 : int ret;
515 :
516 0 : filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
517 :
518 0 : if (rss->queue_num <= 1)
519 : {
520 0 : ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
521 : act, "Queue region size can't be 0 or 1.");
522 0 : return ret;
523 : }
524 :
525 : /* check if queue index for queue region is continuous */
526 0 : for (i = 0; i < rss->queue_num - 1; i++)
527 : {
528 0 : if (rss->queue[i + 1] != rss->queue[i] + 1)
529 : {
530 : ret =
531 0 : avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
532 : act, "Discontinuous queue region");
533 0 : return ret;
534 : }
535 : }
536 :
537 0 : if (rss->queue[rss->queue_num - 1] >= rcfg->nb_rx_queues)
538 : {
539 0 : ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
540 : act, "Invalid queue region indexes.");
541 0 : return ret;
542 : }
543 :
544 0 : if (!(is_pow2 (rss->queue_num) &&
545 0 : rss->queue_num <= AVF_FDIR_MAX_QREGION_SIZE))
546 : {
547 0 : ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
548 : act,
549 : "The region size should be any of the"
550 : "following values: 1, 2, 4, 8, 16, 32"
551 : ", 64, 128 as long as the total number of"
552 : "queues do not exceed the VSI allocation");
553 0 : return ret;
554 : }
555 :
556 0 : filter_action->type = VIRTCHNL_ACTION_Q_REGION;
557 0 : filter_action->act_conf.queue.index = rss->queue[0];
558 0 : filter_action->act_conf.queue.region = fls_u32 (rss->queue_num) - 1;
559 :
560 0 : return 0;
561 : }
562 :
563 : int
564 0 : avf_fdir_rcfg_act_drop (struct avf_fdir_conf *rcfg, int act_idx)
565 : {
566 : struct virtchnl_filter_action *filter_action;
567 :
568 0 : if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
569 0 : return -AVF_FAILURE;
570 :
571 0 : filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
572 0 : filter_action->type = VIRTCHNL_ACTION_DROP;
573 :
574 0 : return 0;
575 : }
576 :
577 : int
578 0 : avf_fdir_rcfg_act_mark (struct avf_fdir_conf *rcfg, const u32 mark,
579 : int act_idx)
580 : {
581 : struct virtchnl_filter_action *filter_action;
582 0 : if (act_idx >= VIRTCHNL_MAX_NUM_ACTIONS)
583 0 : return -AVF_FAILURE;
584 :
585 0 : filter_action = rcfg->add_fltr.rule_cfg.action_set.actions + act_idx;
586 :
587 0 : filter_action->type = VIRTCHNL_ACTION_MARK;
588 0 : filter_action->act_conf.mark_id = mark;
589 :
590 0 : return 0;
591 : }
592 :
593 : int
594 0 : avf_fdir_rcfg_validate (struct avf_flow_vc_ctx *ctx,
595 : struct avf_fdir_conf *rcfg)
596 : {
597 : int ret;
598 0 : rcfg->add_fltr.vsi_id = rcfg->vsi;
599 0 : rcfg->add_fltr.validate_only = 1;
600 : struct virtchnl_fdir_add fdir_ret;
601 :
602 : ret =
603 0 : ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_ADD_FDIR_FILTER, &rcfg->add_fltr,
604 : sizeof (rcfg->add_fltr), &fdir_ret, sizeof (fdir_ret));
605 :
606 0 : if (ret != 0)
607 : {
608 0 : return ret;
609 : }
610 :
611 0 : if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
612 : {
613 0 : ret = -fdir_ret.status;
614 : }
615 :
616 0 : return ret;
617 : }
618 :
619 : int
620 0 : avf_fdir_rule_create (struct avf_flow_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
621 : {
622 : int ret;
623 0 : rcfg->add_fltr.vsi_id = rcfg->vsi;
624 0 : rcfg->add_fltr.validate_only = 0;
625 : struct virtchnl_fdir_add fdir_ret;
626 :
627 : ret =
628 0 : ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_ADD_FDIR_FILTER, &rcfg->add_fltr,
629 : sizeof (rcfg->add_fltr), &fdir_ret, sizeof (fdir_ret));
630 :
631 0 : if (ret != 0)
632 : {
633 0 : return ret;
634 : }
635 :
636 0 : rcfg->flow_id = fdir_ret.flow_id;
637 :
638 0 : if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
639 : {
640 0 : ret = -fdir_ret.status;
641 : }
642 :
643 0 : return ret;
644 : }
645 :
646 : int
647 0 : avf_fdir_rule_destroy (struct avf_flow_vc_ctx *ctx, struct avf_fdir_conf *rcfg)
648 : {
649 : int ret;
650 : struct virtchnl_fdir_del fdir_ret;
651 0 : rcfg->del_fltr.vsi_id = rcfg->vsi;
652 0 : rcfg->del_fltr.flow_id = rcfg->flow_id;
653 :
654 : ret =
655 0 : ctx->vc_op (ctx->vc_hdl, VIRTCHNL_ADV_OP_DEL_FDIR_FILTER, &rcfg->del_fltr,
656 : sizeof (rcfg->del_fltr), &fdir_ret, sizeof (fdir_ret));
657 :
658 0 : if (ret != 0)
659 : {
660 0 : return ret;
661 : }
662 :
663 0 : if (fdir_ret.status != VIRTCHNL_FDIR_SUCCESS)
664 : {
665 0 : ret = -fdir_ret.status;
666 : }
667 :
668 0 : return ret;
669 : }
670 :
671 : int
672 0 : avf_fdir_parse_action (const struct avf_flow_action actions[],
673 : struct avf_fdir_conf *rcfg,
674 : struct avf_flow_error *error)
675 : {
676 0 : int act_idx = 0, ret = 0;
677 0 : u32 dest_num = 0;
678 0 : u32 mark_num = 0;
679 : u32 act_num;
680 : struct virtchnl_filter_action *filter_action;
681 : const struct avf_flow_action_queue *act_q;
682 : const struct avf_flow_action_mark *act_msk;
683 :
684 0 : struct virtchnl_fdir_rule *rule_cfg = &rcfg->add_fltr.rule_cfg;
685 :
686 0 : for (; actions->type != AVF_FLOW_ACTION_TYPE_END; actions++, act_idx++)
687 : {
688 0 : switch (actions->type)
689 : {
690 0 : case AVF_FLOW_ACTION_TYPE_PASSTHRU:
691 0 : dest_num++;
692 0 : filter_action = &rule_cfg->action_set.actions[act_idx];
693 0 : filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
694 0 : rule_cfg->action_set.count++;
695 0 : break;
696 :
697 0 : case AVF_FLOW_ACTION_TYPE_DROP:
698 0 : dest_num++;
699 0 : ret = avf_fdir_rcfg_act_drop (rcfg, act_idx);
700 0 : if (ret)
701 0 : return ret;
702 :
703 0 : rule_cfg->action_set.count++;
704 0 : break;
705 :
706 0 : case AVF_FLOW_ACTION_TYPE_QUEUE:
707 0 : dest_num++;
708 0 : act_q = actions->conf;
709 :
710 0 : if (act_q->index >= rcfg->nb_rx_queues)
711 : {
712 0 : ret = avf_flow_error_set (error, AVF_FAILURE,
713 : AVF_FLOW_ERROR_TYPE_ACTION, actions,
714 : "Invalid queue for FDIR.");
715 0 : return -AVF_FAILURE;
716 : }
717 :
718 0 : ret = avf_fdir_rcfg_act_queue (rcfg, act_q->index, 1, act_idx);
719 0 : if (ret)
720 0 : return ret;
721 :
722 0 : rule_cfg->action_set.count++;
723 0 : break;
724 :
725 0 : case AVF_FLOW_ACTION_TYPE_RSS:
726 0 : dest_num++;
727 0 : filter_action = &rule_cfg->action_set.actions[act_idx];
728 0 : ret = avf_fdir_parse_action_qregion (rcfg, actions, act_idx, error);
729 0 : if (ret)
730 0 : return ret;
731 :
732 0 : rule_cfg->action_set.count++;
733 0 : break;
734 :
735 0 : case AVF_FLOW_ACTION_TYPE_MARK:
736 0 : mark_num++;
737 0 : act_msk = actions->conf;
738 0 : rcfg->mark_flag = 1;
739 :
740 0 : ret = avf_fdir_rcfg_act_mark (rcfg, act_msk->id, act_idx);
741 0 : if (ret)
742 0 : return ret;
743 :
744 0 : rule_cfg->action_set.count++;
745 0 : break;
746 :
747 0 : default:
748 : ret =
749 0 : avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
750 : actions, "Invalid action.");
751 0 : return ret;
752 : }
753 : }
754 :
755 0 : if (dest_num >= 2)
756 : {
757 0 : ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
758 : actions, "Unsupported action combination");
759 0 : return ret;
760 : }
761 :
762 0 : if (mark_num >= 2)
763 : {
764 0 : ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
765 : actions, "Too many mark actions");
766 0 : return ret;
767 : }
768 :
769 0 : if (dest_num + mark_num == 0)
770 : {
771 0 : ret = avf_flow_error_set (error, AVF_FAILURE, AVF_FLOW_ERROR_TYPE_ACTION,
772 : actions, "Empty action");
773 0 : return ret;
774 : }
775 :
776 : /* Mark only is equal to mark + passthru. */
777 0 : act_num = rule_cfg->action_set.count;
778 0 : if (dest_num == 0)
779 : {
780 0 : filter_action = &rule_cfg->action_set.actions[act_num];
781 0 : filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
782 0 : rule_cfg->action_set.count = ++act_num;
783 : }
784 :
785 0 : return ret;
786 : }
787 :
788 : int
789 0 : avf_fdir_parse_generic_pattern (struct avf_fdir_conf *rcfg,
790 : struct avf_flow_item avf_items[],
791 : struct avf_flow_error *error)
792 : {
793 0 : struct avf_flow_item *item = avf_items;
794 : u8 *pkt_buf, *msk_buf;
795 : u16 spec_len, pkt_len;
796 :
797 0 : spec_len = clib_strnlen (item->spec, VIRTCHNL_MAX_SIZE_GEN_PACKET);
798 0 : pkt_len = spec_len / 2;
799 :
800 0 : pkt_buf = clib_mem_alloc (pkt_len);
801 0 : msk_buf = clib_mem_alloc (pkt_len);
802 :
803 0 : avf_parse_generic_pattern (item, pkt_buf, msk_buf, spec_len);
804 :
805 0 : clib_memcpy (rcfg->add_fltr.rule_cfg.proto_hdrs.raw.spec, pkt_buf, pkt_len);
806 0 : clib_memcpy (rcfg->add_fltr.rule_cfg.proto_hdrs.raw.mask, msk_buf, pkt_len);
807 :
808 0 : rcfg->add_fltr.rule_cfg.proto_hdrs.count = 0;
809 0 : rcfg->add_fltr.rule_cfg.proto_hdrs.tunnel_level = 0;
810 0 : rcfg->add_fltr.rule_cfg.proto_hdrs.raw.pkt_len = pkt_len;
811 :
812 0 : clib_mem_free (pkt_buf);
813 0 : clib_mem_free (msk_buf);
814 :
815 0 : return 0;
816 : }
817 :
818 : int
819 0 : avf_fdir_parse_pattern (struct avf_fdir_conf *rcfg,
820 : struct avf_flow_item avf_items[],
821 : struct avf_flow_error *error)
822 : {
823 0 : int layer = 0;
824 0 : int ret = 0;
825 : struct avf_flow_item *item;
826 :
827 0 : for (item = avf_items; item->type != AVF_FLOW_ITEM_TYPE_END; item++)
828 : {
829 0 : ret = avf_fdir_rcfg_set_field (rcfg, layer, item, error);
830 0 : if (ret)
831 0 : return ret;
832 :
833 0 : rcfg->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
834 : }
835 :
836 0 : return ret;
837 : }
838 :
839 : int
840 0 : avf_flow_error_set (struct avf_flow_error *error, int code,
841 : enum avf_flow_error_type type, const void *cause,
842 : const char *message)
843 : {
844 0 : if (error)
845 : {
846 0 : *error = (struct avf_flow_error){
847 : .type = type,
848 : .cause = cause,
849 : .message = message,
850 : };
851 : }
852 :
853 0 : return code;
854 : }
855 :
856 : char *
857 0 : avf_fdir_prgm_error_decode (int err_no)
858 : {
859 : enum virtchnl_fdir_prgm_status status;
860 0 : char *s = NULL;
861 :
862 0 : err_no = -err_no;
863 :
864 0 : if (err_no >= VIRTCHNL_FDIR_FAILURE_MAX)
865 0 : return "Failed to program the rule due to other reasons";
866 :
867 0 : status = (enum virtchnl_fdir_prgm_status) err_no;
868 0 : switch (status)
869 : {
870 0 : case VIRTCHNL_FDIR_SUCCESS:
871 0 : s = "Succeed in programming rule request by PF";
872 0 : break;
873 0 : case VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE:
874 0 : s = "Failed to add rule request due to no hardware resource";
875 0 : break;
876 0 : case VIRTCHNL_FDIR_FAILURE_RULE_EXIST:
877 0 : s = "Failed to add rule request due to the rule is already existed";
878 0 : break;
879 0 : case VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT:
880 0 : s = "Failed to add rule request due to the rule is conflict with "
881 : "existing rule";
882 0 : break;
883 0 : case VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST:
884 0 : s = "Failed to delete rule request due to this rule doesn't exist";
885 0 : break;
886 0 : case VIRTCHNL_FDIR_FAILURE_RULE_INVALID:
887 0 : s = "Failed to add rule request due to the hardware doesn't support";
888 0 : break;
889 0 : case VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT:
890 0 : s = "Failed to add rule request due to time out for programming";
891 0 : break;
892 0 : case VIRTCHNL_FDIR_FAILURE_QUERY_INVALID:
893 0 : s = "Succeed in programming rule request by PF";
894 0 : break;
895 0 : default:
896 0 : s = "Failed to program the rule due to other reasons";
897 0 : break;
898 : }
899 :
900 0 : return s;
901 : }
902 :
903 : /*
904 : * fd.io coding-style-patch-verification: ON
905 : *
906 : * Local Variables:
907 : * eval: (c-set-style "gnu")
908 : * End:
909 : */
|