Line data Source code
1 : /*
2 : *------------------------------------------------------------------
3 : * Copyright (c) 2020 Intel and/or its affiliates.
4 : * Licensed under the Apache License, Version 2.0 (the "License");
5 : * you may not use this file except in compliance with the License.
6 : * You may obtain a copy of the License at:
7 : *
8 : * http://www.apache.org/licenses/LICENSE-2.0
9 : *
10 : * Unless required by applicable law or agreed to in writing, software
11 : * distributed under the License is distributed on an "AS IS" BASIS,
12 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 : * See the License for the specific language governing permissions and
14 : * limitations under the License.
15 : *------------------------------------------------------------------
16 : */
17 :
18 : #include <stdbool.h>
19 : #include <vlib/vlib.h>
20 : #include <vppinfra/ring.h>
21 : #include <vlib/unix/unix.h>
22 : #include <vlib/pci/pci.h>
23 : #include <vnet/ethernet/ethernet.h>
24 :
25 : #include <avf/avf.h>
26 : #include <avf/avf_advanced_flow.h>
27 :
28 : #define FLOW_IS_ETHERNET_CLASS(f) (f->type == VNET_FLOW_TYPE_ETHERNET)
29 :
30 : #define FLOW_IS_IPV4_CLASS(f) \
31 : ((f->type == VNET_FLOW_TYPE_IP4) || \
32 : (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
33 : (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
34 : (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
35 : (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
36 : (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
37 : (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
38 : (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
39 : (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
40 :
41 : #define FLOW_IS_IPV6_CLASS(f) \
42 : ((f->type == VNET_FLOW_TYPE_IP6) || \
43 : (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
44 : (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
45 : (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
46 :
47 : #define FLOW_IS_GENERIC_CLASS(f) (f->type == VNET_FLOW_TYPE_GENERIC)
48 :
49 : /* check if flow is L3 type */
50 : #define FLOW_IS_L3_TYPE(f) \
51 : ((f->type == VNET_FLOW_TYPE_IP4) || (f->type == VNET_FLOW_TYPE_IP6))
52 :
53 : /* check if flow is L4 type */
54 : #define FLOW_IS_L4_TYPE(f) \
55 : ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
56 : (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
57 : (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
58 : (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
59 :
60 : /* check if flow is L4 tunnel type */
61 : #define FLOW_IS_L4_TUNNEL_TYPE(f) \
62 : ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
63 : (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
64 : (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
65 : (f->type == VNET_FLOW_TYPE_IP4_GTPU))
66 :
67 : static inline void
68 0 : avf_flow_convert_rss_types (u64 type, u64 *avf_rss_type)
69 : {
70 : #define BIT_IS_SET(v, b) ((v) & (u64) 1 << (b))
71 :
72 0 : *avf_rss_type = 0;
73 :
74 : #undef _
75 : #define _(n, f, s) \
76 : if (n != -1 && BIT_IS_SET (type, n)) \
77 : *avf_rss_type |= f;
78 :
79 0 : foreach_avf_rss_hf
80 : #undef _
81 0 : return;
82 : }
83 :
84 : int
85 0 : avf_flow_vc_op_callback (void *vc_hdl, enum virthnl_adv_ops vc_op, void *in,
86 : u32 in_len, void *out, u32 out_len)
87 : {
88 0 : u32 dev_instance = *(u32 *) vc_hdl;
89 0 : avf_device_t *ad = avf_get_device (dev_instance);
90 0 : clib_error_t *err = 0;
91 : int is_add;
92 :
93 0 : if (vc_op >= VIRTCHNL_ADV_OP_MAX)
94 : {
95 0 : return -1;
96 : }
97 :
98 0 : switch (vc_op)
99 : {
100 0 : case VIRTCHNL_ADV_OP_ADD_FDIR_FILTER:
101 : case VIRTCHNL_ADV_OP_ADD_RSS_CFG:
102 0 : is_add = 1;
103 0 : break;
104 0 : case VIRTCHNL_ADV_OP_DEL_FDIR_FILTER:
105 : case VIRTCHNL_ADV_OP_DEL_RSS_CFG:
106 0 : is_add = 0;
107 0 : break;
108 0 : default:
109 0 : avf_log_err (ad, "unsupported avf virtual channel opcode %u\n",
110 : (u32) vc_op);
111 0 : return -1;
112 : }
113 :
114 : err =
115 0 : avf_program_flow (dev_instance, is_add, vc_op, in, in_len, out, out_len);
116 0 : if (err != 0)
117 : {
118 0 : avf_log_err (ad, "avf flow program failed: %U", format_clib_error, err);
119 0 : clib_error_free (err);
120 0 : return -1;
121 : }
122 :
123 0 : avf_log_debug (ad, "avf flow program success");
124 0 : return 0;
125 : }
126 :
127 : static inline enum avf_eth_hash_function
128 0 : avf_flow_convert_rss_func (vnet_rss_function_t func)
129 : {
130 : enum avf_eth_hash_function rss_func;
131 :
132 0 : switch (func)
133 : {
134 0 : case VNET_RSS_FUNC_DEFAULT:
135 0 : rss_func = AVF_ETH_HASH_FUNCTION_DEFAULT;
136 0 : break;
137 0 : case VNET_RSS_FUNC_TOEPLITZ:
138 0 : rss_func = AVF_ETH_HASH_FUNCTION_TOEPLITZ;
139 0 : break;
140 0 : case VNET_RSS_FUNC_SIMPLE_XOR:
141 0 : rss_func = AVF_ETH_HASH_FUNCTION_SIMPLE_XOR;
142 0 : break;
143 0 : case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
144 0 : rss_func = AVF_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
145 0 : break;
146 0 : default:
147 0 : rss_func = AVF_ETH_HASH_FUNCTION_MAX;
148 0 : break;
149 : }
150 :
151 0 : return rss_func;
152 : }
153 :
154 : /** Maximum number of queue indices in struct avf_flow_action_rss. */
155 : #define ACTION_RSS_QUEUE_NUM 128
156 :
157 : static inline void
158 0 : avf_flow_convert_rss_queues (u32 queue_index, u32 queue_num,
159 : struct avf_flow_action_rss *act_rss)
160 : {
161 0 : u16 *queues = clib_mem_alloc (sizeof (*queues) * ACTION_RSS_QUEUE_NUM);
162 : int i;
163 :
164 0 : for (i = 0; i < queue_num; i++)
165 0 : queues[i] = queue_index++;
166 :
167 0 : act_rss->queue_num = queue_num;
168 0 : act_rss->queue = queues;
169 :
170 0 : return;
171 : }
172 :
173 : void
174 0 : avf_parse_generic_pattern (struct avf_flow_item *item, u8 *pkt_buf,
175 : u8 *msk_buf, u16 spec_len)
176 : {
177 : u8 *raw_spec, *raw_mask;
178 0 : u8 tmp_val = 0;
179 0 : u8 tmp_c = 0;
180 : int i, j;
181 :
182 0 : raw_spec = (u8 *) item->spec;
183 0 : raw_mask = (u8 *) item->mask;
184 :
185 : /* convert string to int array */
186 0 : for (i = 0, j = 0; i < spec_len; i += 2, j++)
187 : {
188 0 : tmp_c = raw_spec[i];
189 0 : if (tmp_c >= 'a' && tmp_c <= 'f')
190 0 : tmp_val = tmp_c - 'a' + 10;
191 0 : if (tmp_c >= 'A' && tmp_c <= 'F')
192 0 : tmp_val = tmp_c - 'A' + 10;
193 0 : if (tmp_c >= '0' && tmp_c <= '9')
194 0 : tmp_val = tmp_c - '0';
195 :
196 0 : tmp_c = raw_spec[i + 1];
197 0 : if (tmp_c >= 'a' && tmp_c <= 'f')
198 0 : pkt_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
199 0 : if (tmp_c >= 'A' && tmp_c <= 'F')
200 0 : pkt_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
201 0 : if (tmp_c >= '0' && tmp_c <= '9')
202 0 : pkt_buf[j] = tmp_val * 16 + tmp_c - '0';
203 :
204 0 : tmp_c = raw_mask[i];
205 0 : if (tmp_c >= 'a' && tmp_c <= 'f')
206 0 : tmp_val = tmp_c - 0x57;
207 0 : if (tmp_c >= 'A' && tmp_c <= 'F')
208 0 : tmp_val = tmp_c - 0x37;
209 0 : if (tmp_c >= '0' && tmp_c <= '9')
210 0 : tmp_val = tmp_c - '0';
211 :
212 0 : tmp_c = raw_mask[i + 1];
213 0 : if (tmp_c >= 'a' && tmp_c <= 'f')
214 0 : msk_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
215 0 : if (tmp_c >= 'A' && tmp_c <= 'F')
216 0 : msk_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
217 0 : if (tmp_c >= '0' && tmp_c <= '9')
218 0 : msk_buf[j] = tmp_val * 16 + tmp_c - '0';
219 : }
220 0 : }
221 :
222 : static int
223 0 : avf_flow_add (u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
224 : {
225 0 : avf_device_t *ad = avf_get_device (dev_instance);
226 0 : int rv = 0;
227 0 : int ret = 0;
228 0 : u16 src_port = 0, dst_port = 0;
229 0 : u16 src_port_mask = 0, dst_port_mask = 0;
230 0 : u8 protocol = IP_PROTOCOL_RESERVED;
231 0 : bool fate = false;
232 0 : bool is_fdir = true;
233 : struct avf_flow_error error;
234 :
235 0 : int layer = 0;
236 0 : int action_count = 0;
237 :
238 : struct avf_flow_vc_ctx vc_ctx;
239 : struct avf_fdir_conf *filter;
240 : struct virtchnl_rss_cfg *rss_cfg;
241 : struct avf_flow_item avf_items[VIRTCHNL_MAX_NUM_PROTO_HDRS];
242 : struct avf_flow_action avf_actions[VIRTCHNL_MAX_NUM_ACTIONS];
243 :
244 0 : struct avf_ipv4_hdr ip4_spec = {}, ip4_mask = {};
245 0 : struct avf_ipv6_hdr ip6_spec = {}, ip6_mask = {};
246 0 : struct avf_tcp_hdr tcp_spec = {}, tcp_mask = {};
247 0 : struct avf_udp_hdr udp_spec = {}, udp_mask = {};
248 0 : struct avf_gtp_hdr gtp_spec = {}, gtp_mask = {};
249 0 : struct avf_l2tpv3oip_hdr l2tpv3_spec = {}, l2tpv3_mask = {};
250 0 : struct avf_esp_hdr esp_spec = {}, esp_mask = {};
251 0 : struct avf_ah_hdr ah_spec = {}, ah_mask = {};
252 :
253 0 : struct avf_flow_action_queue act_q = {};
254 0 : struct avf_flow_action_mark act_msk = {};
255 0 : struct avf_flow_action_rss act_rss = {};
256 :
257 : enum
258 : {
259 : FLOW_UNKNOWN_CLASS,
260 : FLOW_ETHERNET_CLASS,
261 : FLOW_IPV4_CLASS,
262 : FLOW_IPV6_CLASS,
263 : FLOW_GENERIC_CLASS,
264 0 : } flow_class = FLOW_UNKNOWN_CLASS;
265 :
266 0 : if (FLOW_IS_ETHERNET_CLASS (f))
267 0 : flow_class = FLOW_ETHERNET_CLASS;
268 0 : else if (FLOW_IS_IPV4_CLASS (f))
269 0 : flow_class = FLOW_IPV4_CLASS;
270 0 : else if (FLOW_IS_IPV6_CLASS (f))
271 0 : flow_class = FLOW_IPV6_CLASS;
272 0 : else if (FLOW_IS_GENERIC_CLASS (f))
273 0 : flow_class = FLOW_GENERIC_CLASS;
274 : else
275 0 : return VNET_FLOW_ERROR_NOT_SUPPORTED;
276 :
277 0 : ret = avf_fdir_rcfg_create (&filter, 0, ad->vsi_id, ad->n_rx_queues);
278 0 : if (ret)
279 : {
280 0 : rv = VNET_FLOW_ERROR_INTERNAL;
281 0 : goto done;
282 : }
283 :
284 0 : ret = avf_rss_cfg_create (&rss_cfg, 0);
285 0 : if (ret)
286 : {
287 0 : rv = VNET_FLOW_ERROR_INTERNAL;
288 0 : goto done;
289 : }
290 :
291 : /* init a virtual channel context */
292 0 : vc_ctx.vc_hdl = &dev_instance;
293 0 : vc_ctx.vc_op = avf_flow_vc_op_callback;
294 :
295 0 : clib_memset (avf_items, 0, sizeof (avf_actions));
296 0 : clib_memset (avf_actions, 0, sizeof (avf_actions));
297 :
298 : /* Handle generic flow first */
299 0 : if (flow_class == FLOW_GENERIC_CLASS)
300 : {
301 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_RAW;
302 0 : avf_items[layer].is_generic = true;
303 0 : avf_items[layer].spec = f->generic.pattern.spec;
304 0 : avf_items[layer].mask = f->generic.pattern.mask;
305 :
306 0 : layer++;
307 :
308 0 : goto pattern_end;
309 : }
310 :
311 : /* Ethernet Layer */
312 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_ETH;
313 0 : avf_items[layer].spec = NULL;
314 0 : avf_items[layer].mask = NULL;
315 0 : layer++;
316 :
317 0 : if (flow_class == FLOW_IPV4_CLASS)
318 : {
319 0 : vnet_flow_ip4_t *ip4_ptr = &f->ip4;
320 :
321 : /* IPv4 Layer */
322 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_IPV4;
323 0 : avf_items[layer].spec = &ip4_spec;
324 0 : avf_items[layer].mask = &ip4_mask;
325 0 : layer++;
326 :
327 0 : if ((!ip4_ptr->src_addr.mask.as_u32) &&
328 0 : (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
329 : {
330 : ;
331 : }
332 : else
333 : {
334 0 : ip4_spec.src_addr = ip4_ptr->src_addr.addr.as_u32;
335 0 : ip4_mask.src_addr = ip4_ptr->src_addr.mask.as_u32;
336 :
337 0 : ip4_spec.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
338 0 : ip4_mask.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
339 :
340 0 : ip4_spec.next_proto_id = ip4_ptr->protocol.prot;
341 0 : ip4_mask.next_proto_id = ip4_ptr->protocol.mask;
342 : }
343 :
344 0 : if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
345 : {
346 0 : vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
347 :
348 0 : src_port = ip4_n_ptr->src_port.port;
349 0 : dst_port = ip4_n_ptr->dst_port.port;
350 0 : src_port_mask = ip4_n_ptr->src_port.mask;
351 0 : dst_port_mask = ip4_n_ptr->dst_port.mask;
352 : }
353 :
354 0 : protocol = ip4_ptr->protocol.prot;
355 : }
356 0 : else if (flow_class == FLOW_IPV6_CLASS)
357 : {
358 0 : vnet_flow_ip6_t *ip6_ptr = &f->ip6;
359 :
360 : /* IPv6 Layer */
361 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_IPV6;
362 0 : avf_items[layer].spec = &ip6_spec;
363 0 : avf_items[layer].mask = &ip6_mask;
364 0 : layer++;
365 :
366 0 : if ((ip6_address_is_zero (&ip6_ptr->src_addr.mask)) &&
367 0 : (ip6_address_is_zero (&ip6_ptr->dst_addr.mask)) &&
368 0 : (!ip6_ptr->protocol.mask))
369 : {
370 : ;
371 : }
372 : else
373 : {
374 0 : clib_memcpy (ip6_spec.src_addr, &ip6_ptr->src_addr.addr,
375 : ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
376 0 : clib_memcpy (ip6_mask.src_addr, &ip6_ptr->src_addr.mask,
377 : ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
378 0 : clib_memcpy (ip6_spec.dst_addr, &ip6_ptr->dst_addr.addr,
379 : ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
380 0 : clib_memcpy (ip6_mask.dst_addr, &ip6_ptr->dst_addr.mask,
381 : ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
382 0 : ip6_spec.proto = ip6_ptr->protocol.prot;
383 0 : ip6_mask.proto = ip6_ptr->protocol.mask;
384 : }
385 :
386 0 : if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
387 : {
388 0 : vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
389 :
390 0 : src_port = ip6_n_ptr->src_port.port;
391 0 : dst_port = ip6_n_ptr->dst_port.port;
392 0 : src_port_mask = ip6_n_ptr->src_port.mask;
393 0 : dst_port_mask = ip6_n_ptr->dst_port.mask;
394 : }
395 :
396 0 : protocol = ip6_ptr->protocol.prot;
397 : }
398 :
399 0 : if (FLOW_IS_L3_TYPE (f))
400 0 : goto pattern_end;
401 :
402 : /* Layer 4 */
403 0 : switch (protocol)
404 : {
405 0 : case IP_PROTOCOL_L2TP:
406 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_L2TPV3OIP;
407 0 : avf_items[layer].spec = &l2tpv3_spec;
408 0 : avf_items[layer].mask = &l2tpv3_mask;
409 0 : layer++;
410 :
411 0 : vnet_flow_ip4_l2tpv3oip_t *l2tph = &f->ip4_l2tpv3oip;
412 0 : l2tpv3_spec.session_id = clib_host_to_net_u32 (l2tph->session_id);
413 0 : l2tpv3_mask.session_id = ~0;
414 0 : break;
415 :
416 0 : case IP_PROTOCOL_IPSEC_ESP:
417 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_ESP;
418 0 : avf_items[layer].spec = &esp_spec;
419 0 : avf_items[layer].mask = &esp_mask;
420 0 : layer++;
421 :
422 0 : vnet_flow_ip4_ipsec_esp_t *esph = &f->ip4_ipsec_esp;
423 0 : esp_spec.spi = clib_host_to_net_u32 (esph->spi);
424 0 : esp_mask.spi = ~0;
425 0 : break;
426 :
427 0 : case IP_PROTOCOL_IPSEC_AH:
428 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_AH;
429 0 : avf_items[layer].spec = &ah_spec;
430 0 : avf_items[layer].mask = &ah_mask;
431 0 : layer++;
432 :
433 0 : vnet_flow_ip4_ipsec_ah_t *ah = &f->ip4_ipsec_ah;
434 0 : ah_spec.spi = clib_host_to_net_u32 (ah->spi);
435 0 : ah_mask.spi = ~0;
436 0 : break;
437 :
438 0 : case IP_PROTOCOL_TCP:
439 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_TCP;
440 0 : avf_items[layer].spec = &tcp_spec;
441 0 : avf_items[layer].mask = &tcp_mask;
442 0 : layer++;
443 :
444 0 : if (src_port_mask)
445 : {
446 0 : tcp_spec.src_port = clib_host_to_net_u16 (src_port);
447 0 : tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
448 : }
449 0 : if (dst_port_mask)
450 : {
451 0 : tcp_spec.dst_port = clib_host_to_net_u16 (dst_port);
452 0 : tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
453 : }
454 0 : break;
455 :
456 0 : case IP_PROTOCOL_UDP:
457 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_UDP;
458 0 : avf_items[layer].spec = &udp_spec;
459 0 : avf_items[layer].mask = &udp_mask;
460 0 : layer++;
461 :
462 0 : if (src_port_mask)
463 : {
464 0 : udp_spec.src_port = clib_host_to_net_u16 (src_port);
465 0 : udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
466 : }
467 0 : if (dst_port_mask)
468 : {
469 0 : udp_spec.dst_port = clib_host_to_net_u16 (dst_port);
470 0 : udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
471 : }
472 :
473 : /* handle the UDP tunnels */
474 0 : if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
475 : {
476 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_GTPU;
477 0 : avf_items[layer].spec = >p_spec;
478 0 : avf_items[layer].mask = >p_mask;
479 0 : layer++;
480 :
481 0 : vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
482 0 : gtp_spec.teid = clib_host_to_net_u32 (gu->teid);
483 0 : gtp_mask.teid = ~0;
484 : }
485 0 : break;
486 :
487 0 : default:
488 0 : rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
489 0 : goto done;
490 : }
491 :
492 0 : pattern_end:
493 : /* pattern end flag */
494 0 : avf_items[layer].type = AVF_FLOW_ITEM_TYPE_END;
495 :
496 : /* Action */
497 : /* Only one 'fate' can be assigned */
498 0 : if (f->actions & VNET_FLOW_ACTION_RSS)
499 : {
500 0 : is_fdir = false;
501 0 : avf_actions[action_count].conf = &act_rss;
502 0 : avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_RSS;
503 :
504 0 : avf_flow_convert_rss_types (f->rss_types, &act_rss.types);
505 :
506 0 : if ((act_rss.func = avf_flow_convert_rss_func (f->rss_fun)) ==
507 : AVF_ETH_HASH_FUNCTION_MAX)
508 : {
509 0 : rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
510 0 : goto done;
511 : }
512 :
513 0 : if (f->queue_num)
514 : {
515 : /* convert rss queues to array */
516 0 : avf_flow_convert_rss_queues (f->queue_index, f->queue_num, &act_rss);
517 0 : is_fdir = true;
518 : }
519 :
520 0 : fate = true;
521 0 : action_count++;
522 : }
523 :
524 0 : if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
525 : {
526 0 : avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_QUEUE;
527 0 : avf_actions[action_count].conf = &act_q;
528 :
529 0 : act_q.index = f->redirect_queue;
530 0 : if (fate == true)
531 : {
532 0 : rv = VNET_FLOW_ERROR_INTERNAL;
533 0 : goto done;
534 : }
535 : else
536 0 : fate = true;
537 :
538 0 : action_count++;
539 : }
540 :
541 0 : if (f->actions & VNET_FLOW_ACTION_DROP)
542 : {
543 0 : avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_DROP;
544 0 : avf_actions[action_count].conf = NULL;
545 :
546 0 : if (fate == true)
547 : {
548 0 : rv = VNET_FLOW_ERROR_INTERNAL;
549 0 : goto done;
550 : }
551 : else
552 0 : fate = true;
553 0 : action_count++;
554 : }
555 :
556 0 : if (fate == false)
557 : {
558 0 : avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_PASSTHRU;
559 0 : avf_actions[action_count].conf = NULL;
560 :
561 0 : fate = true;
562 0 : action_count++;
563 : }
564 :
565 0 : if (f->actions & VNET_FLOW_ACTION_MARK)
566 : {
567 0 : avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_MARK;
568 0 : avf_actions[action_count].conf = &act_msk;
569 0 : action_count++;
570 :
571 0 : act_msk.id = fe->mark;
572 : }
573 :
574 : /* action end flag */
575 0 : avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_END;
576 :
577 : /* parse pattern and actions */
578 0 : if (is_fdir)
579 : {
580 0 : if (flow_class == FLOW_GENERIC_CLASS)
581 : {
582 0 : ret = avf_fdir_parse_generic_pattern (filter, avf_items, &error);
583 0 : if (ret)
584 : {
585 0 : avf_log_err (ad, "avf fdir parse generic pattern failed: %s",
586 : error.message);
587 0 : rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
588 0 : goto done;
589 : }
590 : }
591 : else
592 : {
593 0 : ret = avf_fdir_parse_pattern (filter, avf_items, &error);
594 0 : if (ret)
595 : {
596 0 : avf_log_err (ad, "avf fdir parse pattern failed: %s",
597 : error.message);
598 0 : rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
599 0 : goto done;
600 : }
601 : }
602 :
603 0 : ret = avf_fdir_parse_action (avf_actions, filter, &error);
604 0 : if (ret)
605 : {
606 0 : avf_log_err (ad, "avf fdir parse action failed: %s", error.message);
607 0 : rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
608 0 : goto done;
609 : }
610 :
611 : /* create flow rule, save rule */
612 0 : ret = avf_fdir_rule_create (&vc_ctx, filter);
613 :
614 0 : if (ret)
615 : {
616 0 : avf_log_err (ad, "avf fdir rule create failed: %s",
617 : avf_fdir_prgm_error_decode (ret));
618 0 : rv = VNET_FLOW_ERROR_INTERNAL;
619 0 : goto done;
620 : }
621 : else
622 : {
623 0 : fe->rcfg = filter;
624 0 : fe->flow_type_flag = 1;
625 : }
626 : }
627 : else
628 : {
629 : ret =
630 0 : avf_rss_parse_pattern_action (avf_items, avf_actions, rss_cfg, &error);
631 0 : if (ret)
632 : {
633 0 : avf_log_err (ad, "avf rss parse pattern action failed: %s",
634 : error.message);
635 0 : rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
636 0 : goto done;
637 : }
638 : /* create flow rule, save rule */
639 0 : ret = avf_rss_rule_create (&vc_ctx, rss_cfg);
640 :
641 0 : if (ret)
642 : {
643 0 : avf_log_err (ad, "avf rss rule create failed");
644 0 : rv = VNET_FLOW_ERROR_INTERNAL;
645 0 : goto done;
646 : }
647 : else
648 : {
649 0 : fe->rss_cfg = rss_cfg;
650 0 : fe->flow_type_flag = 0;
651 : }
652 : }
653 :
654 0 : done:
655 :
656 0 : return rv;
657 : }
658 :
659 : int
660 0 : avf_flow_ops_fn (vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance,
661 : u32 flow_index, uword *private_data)
662 : {
663 0 : vnet_flow_t *flow = vnet_get_flow (flow_index);
664 0 : avf_device_t *ad = avf_get_device (dev_instance);
665 0 : avf_flow_entry_t *fe = NULL;
666 0 : avf_flow_lookup_entry_t *fle = NULL;
667 0 : int rv = 0;
668 :
669 0 : if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
670 : {
671 0 : rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
672 0 : goto done;
673 : }
674 :
675 0 : if (op == VNET_FLOW_DEV_OP_ADD_FLOW)
676 : {
677 0 : pool_get (ad->flow_entries, fe);
678 0 : fe->flow_index = flow->index;
679 :
680 : /* if we need to mark packets, assign one mark */
681 0 : if (flow->actions &
682 : (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
683 : VNET_FLOW_ACTION_BUFFER_ADVANCE))
684 : {
685 : /* reserve slot 0 */
686 0 : if (ad->flow_lookup_entries == 0)
687 0 : pool_get_aligned (ad->flow_lookup_entries, fle,
688 : CLIB_CACHE_LINE_BYTES);
689 0 : pool_get_aligned (ad->flow_lookup_entries, fle,
690 : CLIB_CACHE_LINE_BYTES);
691 0 : fe->mark = fle - ad->flow_lookup_entries;
692 :
693 : /* install entry in the lookup table */
694 0 : clib_memset (fle, -1, sizeof (*fle));
695 0 : if (flow->actions & VNET_FLOW_ACTION_MARK)
696 0 : fle->flow_id = flow->mark_flow_id;
697 0 : if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
698 0 : fle->next_index = flow->redirect_device_input_next_index;
699 0 : if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
700 0 : fle->buffer_advance = flow->buffer_advance;
701 :
702 0 : if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
703 : {
704 0 : ad->flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
705 : }
706 : }
707 : else
708 0 : fe->mark = 0;
709 :
710 0 : switch (flow->type)
711 : {
712 0 : case VNET_FLOW_TYPE_IP4:
713 : case VNET_FLOW_TYPE_IP6:
714 : case VNET_FLOW_TYPE_IP4_N_TUPLE:
715 : case VNET_FLOW_TYPE_IP6_N_TUPLE:
716 : case VNET_FLOW_TYPE_IP4_GTPU:
717 : case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
718 : case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
719 : case VNET_FLOW_TYPE_IP4_IPSEC_AH:
720 : case VNET_FLOW_TYPE_GENERIC:
721 0 : if ((rv = avf_flow_add (dev_instance, flow, fe)))
722 0 : goto done;
723 0 : break;
724 0 : default:
725 0 : rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
726 0 : goto done;
727 : }
728 :
729 0 : *private_data = fe - ad->flow_entries;
730 : }
731 0 : else if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
732 : {
733 0 : fe = vec_elt_at_index (ad->flow_entries, *private_data);
734 :
735 : struct avf_flow_vc_ctx ctx;
736 0 : ctx.vc_hdl = &dev_instance;
737 0 : ctx.vc_op = avf_flow_vc_op_callback;
738 :
739 0 : if (fe->flow_type_flag)
740 : {
741 0 : rv = avf_fdir_rule_destroy (&ctx, fe->rcfg);
742 0 : if (rv)
743 0 : return VNET_FLOW_ERROR_INTERNAL;
744 : }
745 : else
746 : {
747 0 : rv = avf_rss_rule_destroy (&ctx, fe->rss_cfg);
748 0 : if (rv)
749 0 : return VNET_FLOW_ERROR_INTERNAL;
750 : }
751 :
752 0 : if (fe->mark)
753 : {
754 0 : fle = pool_elt_at_index (ad->flow_lookup_entries, fe->mark);
755 0 : clib_memset (fle, -1, sizeof (*fle));
756 0 : pool_put_index (ad->flow_lookup_entries, fe->mark);
757 : }
758 :
759 0 : (void) avf_fdir_rcfg_destroy (fe->rcfg);
760 0 : (void) avf_rss_rcfg_destroy (fe->rss_cfg);
761 0 : clib_memset (fe, 0, sizeof (*fe));
762 0 : pool_put (ad->flow_entries, fe);
763 0 : goto disable_rx_offload;
764 : }
765 : else
766 0 : return VNET_FLOW_ERROR_NOT_SUPPORTED;
767 :
768 0 : done:
769 0 : if (rv)
770 : {
771 0 : if (fe)
772 : {
773 0 : clib_memset (fe, 0, sizeof (*fe));
774 0 : pool_put (ad->flow_entries, fe);
775 : }
776 :
777 0 : if (fle)
778 : {
779 0 : clib_memset (fle, -1, sizeof (*fle));
780 0 : pool_put (ad->flow_lookup_entries, fle);
781 : }
782 : }
783 0 : disable_rx_offload:
784 0 : if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
785 0 : pool_elts (ad->flow_entries) == 0)
786 : {
787 0 : ad->flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
788 : }
789 :
790 0 : return rv;
791 : }
792 :
793 : /*
794 : * fd.io coding-style-patch-verification: ON
795 : *
796 : * Local Variables:
797 : * eval: (c-set-style "gnu")
798 : * End:
799 : */
|