Line data Source code
1 : /*
2 : * Copyright (c) 2015 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <vlib/vlib.h>
17 : #include <vnet/l2/feat_bitmap.h>
18 : #include <vnet/l2/l2_rw.h>
19 : #include <vnet/classify/vnet_classify.h>
20 :
21 : /**
22 : * @file
23 : * @brief Layer 2 Rewrite.
24 : *
25 : * Layer 2-Rewrite node uses classify tables to match packets. Then, using
26 : * the provisioned mask and value, modifies the packet header.
27 : */
28 :
29 :
30 : #ifndef CLIB_MARCH_VARIANT
31 : l2_rw_main_t l2_rw_main;
32 : #endif /* CLIB_MARCH_VARIANT */
33 :
34 : typedef struct
35 : {
36 : u32 sw_if_index;
37 : u32 classify_table_index;
38 : u32 rewrite_entry_index;
39 : } l2_rw_trace_t;
40 :
41 : static u8 *
42 0 : format_l2_rw_entry (u8 * s, va_list * args)
43 : {
44 0 : l2_rw_entry_t *e = va_arg (*args, l2_rw_entry_t *);
45 0 : l2_rw_main_t *rw = &l2_rw_main;
46 0 : s = format (s, "%d - mask:%U value:%U\n",
47 0 : e - rw->entries,
48 : format_hex_bytes, e->mask,
49 0 : e->rewrite_n_vectors * sizeof (u32x4), format_hex_bytes,
50 0 : e->value, e->rewrite_n_vectors * sizeof (u32x4));
51 : s =
52 0 : format (s, " hits:%d skip_bytes:%d", e->hit_count,
53 0 : e->skip_n_vectors * sizeof (u32x4));
54 0 : return s;
55 : }
56 :
57 : static u8 *
58 0 : format_l2_rw_config (u8 * s, va_list * args)
59 : {
60 0 : l2_rw_config_t *c = va_arg (*args, l2_rw_config_t *);
61 0 : return format (s, "table-index:%d miss-index:%d",
62 : c->table_index, c->miss_index);
63 : }
64 :
65 : /* packet trace format function */
66 : static u8 *
67 0 : format_l2_rw_trace (u8 * s, va_list * args)
68 : {
69 0 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
70 0 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
71 0 : l2_rw_trace_t *t = va_arg (*args, l2_rw_trace_t *);
72 0 : return format (s, "l2-rw: sw_if_index %d, table %d, entry %d",
73 : t->sw_if_index, t->classify_table_index,
74 : t->rewrite_entry_index);
75 : }
76 :
77 : always_inline l2_rw_config_t *
78 0 : l2_rw_get_config (u32 sw_if_index)
79 : {
80 0 : l2_rw_main_t *rw = &l2_rw_main;
81 0 : if (PREDICT_FALSE (!clib_bitmap_get (rw->configs_bitmap, sw_if_index)))
82 : {
83 0 : vec_validate (rw->configs, sw_if_index);
84 0 : rw->configs[sw_if_index].table_index = ~0;
85 0 : rw->configs[sw_if_index].miss_index = ~0;
86 0 : rw->configs_bitmap =
87 0 : clib_bitmap_set (rw->configs_bitmap, sw_if_index, 1);
88 : }
89 0 : return &rw->configs[sw_if_index];
90 : }
91 :
92 : static_always_inline void
93 0 : l2_rw_rewrite (l2_rw_entry_t * rwe, u8 * h)
94 : {
95 0 : u32x4u *d = ((u32x4u *) h) + rwe->skip_n_vectors;
96 0 : switch (rwe->rewrite_n_vectors)
97 : {
98 0 : case 5:
99 0 : d[4] = (d[4] & ~rwe->mask[4]) | rwe->value[4];
100 : /* FALLTHROUGH */
101 0 : case 4:
102 0 : d[3] = (d[3] & ~rwe->mask[3]) | rwe->value[3];
103 : /* FALLTHROUGH */
104 0 : case 3:
105 0 : d[2] = (d[2] & ~rwe->mask[2]) | rwe->value[2];
106 : /* FALLTHROUGH */
107 0 : case 2:
108 0 : d[1] = (d[1] & ~rwe->mask[1]) | rwe->value[1];
109 : /* FALLTHROUGH */
110 0 : case 1:
111 0 : d[0] = (d[0] & ~rwe->mask[0]) | rwe->value[0];
112 0 : break;
113 0 : default:
114 0 : abort ();
115 : }
116 0 : }
117 :
118 2236 : VLIB_NODE_FN (l2_rw_node) (vlib_main_t * vm,
119 : vlib_node_runtime_t * node, vlib_frame_t * frame)
120 : {
121 0 : l2_rw_main_t *rw = &l2_rw_main;
122 : u32 n_left_from, *from, *to_next, next_index;
123 0 : vnet_classify_main_t *vcm = &vnet_classify_main;
124 0 : f64 now = vlib_time_now (vlib_get_main ());
125 :
126 0 : from = vlib_frame_vector_args (frame);
127 0 : n_left_from = frame->n_vectors; /* number of packets to process */
128 0 : next_index = node->cached_next_index;
129 :
130 0 : while (n_left_from > 0)
131 : {
132 : u32 n_left_to_next;
133 :
134 : /* get space to enqueue frame to graph node "next_index" */
135 0 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
136 :
137 0 : while (n_left_from >= 6 && n_left_to_next >= 2)
138 : {
139 : u32 bi0, next0, sw_if_index0, rwe_index0;
140 : u32 bi1, next1, sw_if_index1, rwe_index1;
141 : vlib_buffer_t *b0, *b1;
142 : ethernet_header_t *h0, *h1;
143 : l2_rw_config_t *config0, *config1;
144 : u64 hash0, hash1;
145 : vnet_classify_table_t *t0, *t1;
146 : vnet_classify_entry_t *e0, *e1;
147 : l2_rw_entry_t *rwe0, *rwe1;
148 :
149 : {
150 : vlib_buffer_t *p2, *p3, *p4, *p5;
151 0 : p2 = vlib_get_buffer (vm, from[2]);
152 0 : p3 = vlib_get_buffer (vm, from[3]);
153 0 : p4 = vlib_get_buffer (vm, from[4]);
154 0 : p5 = vlib_get_buffer (vm, from[5]);
155 :
156 0 : vlib_prefetch_buffer_header (p4, LOAD);
157 0 : vlib_prefetch_buffer_header (p5, LOAD);
158 0 : vlib_prefetch_buffer_data (p2, LOAD);
159 0 : vlib_prefetch_buffer_data (p3, LOAD);
160 : }
161 :
162 0 : bi0 = from[0];
163 0 : bi1 = from[1];
164 0 : to_next[0] = bi0;
165 0 : to_next[1] = bi1;
166 0 : from += 2;
167 0 : to_next += 2;
168 0 : n_left_from -= 2;
169 0 : n_left_to_next -= 2;
170 :
171 0 : b0 = vlib_get_buffer (vm, bi0);
172 0 : b1 = vlib_get_buffer (vm, bi1);
173 0 : h0 = vlib_buffer_get_current (b0);
174 0 : h1 = vlib_buffer_get_current (b1);
175 :
176 0 : sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
177 0 : sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
178 0 : config0 = l2_rw_get_config (sw_if_index0); /*TODO: check sw_if_index0 value */
179 0 : config1 = l2_rw_get_config (sw_if_index1); /*TODO: check sw_if_index0 value */
180 0 : t0 = pool_elt_at_index (vcm->tables, config0->table_index);
181 0 : t1 = pool_elt_at_index (vcm->tables, config1->table_index);
182 :
183 0 : hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
184 0 : hash1 = vnet_classify_hash_packet (t1, (u8 *) h1);
185 0 : e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
186 0 : e1 = vnet_classify_find_entry (t1, (u8 *) h1, hash1, now);
187 :
188 0 : while (!e0 && (t0->next_table_index != ~0))
189 : {
190 0 : t0 = pool_elt_at_index (vcm->tables, t0->next_table_index);
191 0 : hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
192 0 : e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
193 : }
194 :
195 0 : while (!e1 && (t1->next_table_index != ~0))
196 : {
197 0 : t1 = pool_elt_at_index (vcm->tables, t1->next_table_index);
198 0 : hash1 = vnet_classify_hash_packet (t1, (u8 *) h1);
199 0 : e1 = vnet_classify_find_entry (t1, (u8 *) h1, hash1, now);
200 : }
201 :
202 0 : rwe_index0 = e0 ? e0->opaque_index : config0->miss_index;
203 0 : rwe_index1 = e1 ? e1->opaque_index : config1->miss_index;
204 :
205 0 : if (rwe_index0 != ~0)
206 : {
207 0 : rwe0 = pool_elt_at_index (rw->entries, rwe_index0);
208 0 : l2_rw_rewrite (rwe0, (u8 *) h0);
209 : }
210 0 : if (rwe_index1 != ~0)
211 : {
212 0 : rwe1 = pool_elt_at_index (rw->entries, rwe_index1);
213 0 : l2_rw_rewrite (rwe1, (u8 *) h1);
214 : }
215 :
216 0 : if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
217 : {
218 0 : l2_rw_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
219 0 : t->sw_if_index = sw_if_index0;
220 0 : t->classify_table_index = config0->table_index;
221 0 : t->rewrite_entry_index = rwe_index0;
222 : }
223 :
224 0 : if (PREDICT_FALSE ((b1->flags & VLIB_BUFFER_IS_TRACED)))
225 : {
226 0 : l2_rw_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
227 0 : t->sw_if_index = sw_if_index1;
228 0 : t->classify_table_index = config1->table_index;
229 0 : t->rewrite_entry_index = rwe_index1;
230 : }
231 :
232 : /* Update feature bitmap and get next feature index */
233 0 : next0 = vnet_l2_feature_next (b0, rw->feat_next_node_index,
234 : L2INPUT_FEAT_RW);
235 0 : next1 = vnet_l2_feature_next (b1, rw->feat_next_node_index,
236 : L2INPUT_FEAT_RW);
237 :
238 0 : vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
239 : to_next, n_left_to_next,
240 : bi0, bi1, next0, next1);
241 : }
242 :
243 0 : while (n_left_from > 0 && n_left_to_next > 0)
244 : {
245 : u32 bi0, next0, sw_if_index0, rwe_index0;
246 : vlib_buffer_t *b0;
247 : ethernet_header_t *h0;
248 : l2_rw_config_t *config0;
249 : u64 hash0;
250 : vnet_classify_table_t *t0;
251 : vnet_classify_entry_t *e0;
252 : l2_rw_entry_t *rwe0;
253 :
254 0 : bi0 = from[0];
255 0 : to_next[0] = bi0;
256 0 : from += 1;
257 0 : to_next += 1;
258 0 : n_left_from -= 1;
259 0 : n_left_to_next -= 1;
260 :
261 0 : b0 = vlib_get_buffer (vm, bi0);
262 0 : h0 = vlib_buffer_get_current (b0);
263 :
264 0 : sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
265 0 : config0 = l2_rw_get_config (sw_if_index0); /*TODO: check sw_if_index0 value */
266 0 : t0 = pool_elt_at_index (vcm->tables, config0->table_index);
267 :
268 0 : hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
269 0 : e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
270 :
271 0 : while (!e0 && (t0->next_table_index != ~0))
272 : {
273 0 : t0 = pool_elt_at_index (vcm->tables, t0->next_table_index);
274 0 : hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
275 0 : e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
276 : }
277 :
278 0 : rwe_index0 = e0 ? e0->opaque_index : config0->miss_index;
279 :
280 0 : if (rwe_index0 != ~0)
281 : {
282 0 : rwe0 = pool_elt_at_index (rw->entries, rwe_index0);
283 0 : l2_rw_rewrite (rwe0, (u8 *) h0);
284 : }
285 :
286 0 : if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
287 : {
288 0 : l2_rw_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
289 0 : t->sw_if_index = sw_if_index0;
290 0 : t->classify_table_index = config0->table_index;
291 0 : t->rewrite_entry_index = rwe_index0;
292 : }
293 :
294 : /* Update feature bitmap and get next feature index */
295 0 : next0 = vnet_l2_feature_next (b0, rw->feat_next_node_index,
296 : L2INPUT_FEAT_RW);
297 :
298 0 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
299 : to_next, n_left_to_next,
300 : bi0, next0);
301 : }
302 0 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
303 : }
304 :
305 0 : return frame->n_vectors;
306 : }
307 :
308 : #ifndef CLIB_MARCH_VARIANT
309 : int
310 0 : l2_rw_mod_entry (u32 * index,
311 : u8 * mask, u8 * value, u32 len, u32 skip, u8 is_del)
312 : {
313 0 : l2_rw_main_t *rw = &l2_rw_main;
314 0 : l2_rw_entry_t *e = 0;
315 0 : if (*index != ~0)
316 : {
317 0 : if (pool_is_free_index (rw->entries, *index))
318 : {
319 0 : return -1;
320 : }
321 0 : e = pool_elt_at_index (rw->entries, *index);
322 : }
323 : else
324 : {
325 0 : pool_get (rw->entries, e);
326 0 : *index = e - rw->entries;
327 : }
328 :
329 0 : if (is_del)
330 : {
331 0 : pool_put (rw->entries, e);
332 0 : return 0;
333 : }
334 :
335 0 : e->skip_n_vectors = skip / sizeof (u32x4);
336 0 : skip -= e->skip_n_vectors * sizeof (u32x4);
337 0 : e->rewrite_n_vectors = (skip + len - 1) / sizeof (u32x4) + 1;
338 0 : vec_alloc_aligned (e->mask, e->rewrite_n_vectors, sizeof (u32x4));
339 0 : clib_memset (e->mask, 0, e->rewrite_n_vectors * sizeof (u32x4));
340 0 : vec_alloc_aligned (e->value, e->rewrite_n_vectors, sizeof (u32x4));
341 0 : clib_memset (e->value, 0, e->rewrite_n_vectors * sizeof (u32x4));
342 :
343 0 : clib_memcpy (((u8 *) e->value) + skip, value, len);
344 0 : clib_memcpy (((u8 *) e->mask) + skip, mask, len);
345 :
346 : int i;
347 0 : for (i = 0; i < e->rewrite_n_vectors; i++)
348 : {
349 0 : e->value[i] &= e->mask[i];
350 : }
351 :
352 0 : return 0;
353 : }
354 : #endif /* CLIB_MARCH_VARIANT */
355 :
356 : static clib_error_t *
357 0 : l2_rw_entry_cli_fn (vlib_main_t * vm,
358 : unformat_input_t * input, vlib_cli_command_t * cmd)
359 : {
360 0 : u32 index = ~0;
361 0 : u8 *mask = 0;
362 0 : u8 *value = 0;
363 0 : u32 skip = 0;
364 0 : u8 del = 0;
365 :
366 0 : while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
367 : {
368 0 : if (unformat (input, "index %d", &index))
369 : ;
370 0 : else if (unformat (input, "mask %U", unformat_hex_string, &mask))
371 : ;
372 0 : else if (unformat (input, "value %U", unformat_hex_string, &value))
373 : ;
374 0 : else if (unformat (input, "skip %d", &skip))
375 : ;
376 0 : else if (unformat (input, "del"))
377 0 : del = 1;
378 : else
379 0 : break;
380 : }
381 :
382 0 : if (!mask || !value)
383 0 : return clib_error_return (0, "Unspecified mask or value");
384 :
385 0 : if (vec_len (mask) != vec_len (value))
386 0 : return clib_error_return (0, "Mask and value lengths must be identical");
387 :
388 : int ret;
389 0 : if ((ret =
390 0 : l2_rw_mod_entry (&index, mask, value, vec_len (mask), skip, del)))
391 0 : return clib_error_return (0, "Could not add entry");
392 :
393 0 : return 0;
394 : }
395 :
396 : /*?
397 : * Layer 2-Rewrite node uses classify tables to match packets. Then, using
398 : * the provisioned mask and value, modifies the packet header.
399 : *
400 : * @cliexpar
401 : * @todo This is incomplete. This needs a detailed description and a
402 : * practical example.
403 : ?*/
404 : /* *INDENT-OFF* */
405 272887 : VLIB_CLI_COMMAND (l2_rw_entry_cli, static) = {
406 : .path = "l2 rewrite entry",
407 : .short_help =
408 : "l2 rewrite entry [index <index>] [mask <hex-mask>] [value <hex-value>] [skip <n_bytes>] [del]",
409 : .function = l2_rw_entry_cli_fn,
410 : };
411 : /* *INDENT-ON* */
412 :
413 : #ifndef CLIB_MARCH_VARIANT
414 : int
415 0 : l2_rw_interface_set_table (u32 sw_if_index, u32 table_index, u32 miss_index)
416 : {
417 0 : l2_rw_config_t *c = l2_rw_get_config (sw_if_index);
418 0 : l2_rw_main_t *rw = &l2_rw_main;
419 :
420 0 : c->table_index = table_index;
421 0 : c->miss_index = miss_index;
422 0 : u32 feature_bitmap = (table_index == ~0) ? 0 : L2INPUT_FEAT_RW;
423 :
424 0 : l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_RW, feature_bitmap);
425 :
426 0 : if (c->table_index == ~0)
427 0 : clib_bitmap_set (rw->configs_bitmap, sw_if_index, 0);
428 :
429 0 : return 0;
430 : }
431 : #endif /* CLIB_MARCH_VARIANT */
432 :
433 : static clib_error_t *
434 0 : l2_rw_interface_cli_fn (vlib_main_t * vm,
435 : unformat_input_t * input, vlib_cli_command_t * cmd)
436 : {
437 0 : vnet_main_t *vnm = vnet_get_main ();
438 0 : u32 table_index = ~0;
439 0 : u32 sw_if_index = ~0;
440 0 : u32 miss_index = ~0;
441 :
442 0 : if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
443 : {
444 0 : unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index);
445 : }
446 :
447 0 : while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
448 : {
449 0 : if (unformat (input, "table %d", &table_index))
450 : ;
451 0 : else if (unformat (input, "miss-index %d", &miss_index))
452 : ;
453 : else
454 0 : break;
455 : }
456 :
457 0 : if (sw_if_index == ~0)
458 0 : return clib_error_return (0,
459 : "You must specify an interface 'iface <interface>'",
460 : format_unformat_error, input);
461 : int ret;
462 0 : if ((ret =
463 0 : l2_rw_interface_set_table (sw_if_index, table_index, miss_index)))
464 0 : return clib_error_return (0, "l2_rw_interface_set_table returned %d",
465 : ret);
466 :
467 0 : return 0;
468 : }
469 :
470 : /*?
471 : * Layer 2-Rewrite node uses classify tables to match packets. Then, using
472 : * the provisioned mask and value, modifies the packet header.
473 : *
474 : * @cliexpar
475 : * @todo This is incomplete. This needs a detailed description and a
476 : * practical example.
477 : ?*/
478 : /* *INDENT-OFF* */
479 272887 : VLIB_CLI_COMMAND (l2_rw_interface_cli, static) = {
480 : .path = "set interface l2 rewrite",
481 : .short_help =
482 : "set interface l2 rewrite <interface> [table <table index>] [miss-index <entry-index>]",
483 : .function = l2_rw_interface_cli_fn,
484 : };
485 : /* *INDENT-ON* */
486 :
487 : static clib_error_t *
488 0 : l2_rw_show_interfaces_cli_fn (vlib_main_t * vm,
489 : unformat_input_t * input,
490 : vlib_cli_command_t * cmd)
491 : {
492 0 : l2_rw_main_t *rw = &l2_rw_main;
493 0 : if (clib_bitmap_count_set_bits (rw->configs_bitmap) == 0)
494 0 : vlib_cli_output (vm, "No interface is currently using l2 rewrite\n");
495 :
496 : uword i;
497 : /* *INDENT-OFF* */
498 0 : clib_bitmap_foreach (i, rw->configs_bitmap) {
499 0 : vlib_cli_output (vm, "sw_if_index:%d %U\n", i, format_l2_rw_config, &rw->configs[i]);
500 : }
501 : /* *INDENT-ON* */
502 0 : return 0;
503 : }
504 :
505 : /*?
506 : * Layer 2-Rewrite node uses classify tables to match packets. Then, using
507 : * the provisioned mask and value, modifies the packet header.
508 : *
509 : * @cliexpar
510 : * @todo This is incomplete. This needs a detailed description and a
511 : * practical example.
512 : ?*/
513 : /* *INDENT-OFF* */
514 272887 : VLIB_CLI_COMMAND (l2_rw_show_interfaces_cli, static) = {
515 : .path = "show l2 rewrite interfaces",
516 : .short_help =
517 : "show l2 rewrite interfaces",
518 : .function = l2_rw_show_interfaces_cli_fn,
519 : };
520 : /* *INDENT-ON* */
521 :
522 : static clib_error_t *
523 0 : l2_rw_show_entries_cli_fn (vlib_main_t * vm,
524 : unformat_input_t * input, vlib_cli_command_t * cmd)
525 : {
526 0 : l2_rw_main_t *rw = &l2_rw_main;
527 : l2_rw_entry_t *e;
528 0 : if (pool_elts (rw->entries) == 0)
529 0 : vlib_cli_output (vm, "No entries\n");
530 :
531 : /* *INDENT-OFF* */
532 0 : pool_foreach (e, rw->entries) {
533 0 : vlib_cli_output (vm, "%U\n", format_l2_rw_entry, e);
534 : }
535 : /* *INDENT-ON* */
536 0 : return 0;
537 : }
538 :
539 : /*?
540 : * Layer 2-Rewrite node uses classify tables to match packets. Then, using
541 : * the provisioned mask and value, modifies the packet header.
542 : *
543 : * @cliexpar
544 : * @todo This is incomplete. This needs a detailed description and a
545 : * practical example.
546 : ?*/
547 : /* *INDENT-OFF* */
548 272887 : VLIB_CLI_COMMAND (l2_rw_show_entries_cli, static) = {
549 : .path = "show l2 rewrite entries",
550 : .short_help =
551 : "show l2 rewrite entries",
552 : .function = l2_rw_show_entries_cli_fn,
553 : };
554 : /* *INDENT-ON* */
555 :
556 : static int
557 0 : l2_rw_enable_disable (u32 bridge_domain, u8 disable)
558 : {
559 0 : u32 mask = L2INPUT_FEAT_RW;
560 0 : l2input_set_bridge_features (bridge_domain, mask, disable ? 0 : mask);
561 0 : return 0;
562 : }
563 :
564 : static clib_error_t *
565 0 : l2_rw_set_cli_fn (vlib_main_t * vm,
566 : unformat_input_t * input, vlib_cli_command_t * cmd)
567 : {
568 : u32 bridge_domain;
569 0 : u8 disable = 0;
570 :
571 0 : if (unformat_check_input (input) == UNFORMAT_END_OF_INPUT ||
572 0 : !unformat (input, "%d", &bridge_domain))
573 : {
574 0 : return clib_error_return (0, "You must specify a bridge domain");
575 : }
576 :
577 0 : if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT &&
578 0 : unformat (input, "disable"))
579 : {
580 0 : disable = 1;
581 : }
582 :
583 0 : if (l2_rw_enable_disable (bridge_domain, disable))
584 0 : return clib_error_return (0, "Could not enable or disable rewrite");
585 :
586 0 : return 0;
587 : }
588 :
589 : /*?
590 : * Layer 2-Rewrite node uses classify tables to match packets. Then, using
591 : * the provisioned mask and value, modifies the packet header.
592 : *
593 : * @cliexpar
594 : * @todo This is incomplete. This needs a detailed description and a
595 : * practical example.
596 : ?*/
597 : /* *INDENT-OFF* */
598 272887 : VLIB_CLI_COMMAND (l2_rw_set_cli, static) = {
599 : .path = "set bridge-domain rewrite",
600 : .short_help =
601 : "set bridge-domain rewrite <bridge-domain> [disable]",
602 : .function = l2_rw_set_cli_fn,
603 : };
604 : /* *INDENT-ON* */
605 :
606 : static clib_error_t *
607 559 : l2_rw_init (vlib_main_t * vm)
608 : {
609 559 : l2_rw_main_t *rw = &l2_rw_main;
610 559 : rw->configs = 0;
611 559 : rw->entries = 0;
612 559 : clib_bitmap_alloc (rw->configs_bitmap, 1);
613 559 : feat_bitmap_init_next_nodes (vm,
614 : l2_rw_node.index,
615 : L2INPUT_N_FEAT,
616 : l2input_get_feat_names (),
617 559 : rw->feat_next_node_index);
618 559 : return 0;
619 : }
620 :
621 24079 : VLIB_INIT_FUNCTION (l2_rw_init);
622 :
623 : enum
624 : {
625 : L2_RW_NEXT_DROP,
626 : L2_RW_N_NEXT,
627 : };
628 :
629 : #define foreach_l2_rw_error \
630 : _(UNKNOWN, "Unknown error")
631 :
632 : typedef enum
633 : {
634 : #define _(sym,str) L2_RW_ERROR_##sym,
635 : foreach_l2_rw_error
636 : #undef _
637 : L2_RW_N_ERROR,
638 : } l2_rw_error_t;
639 :
640 : static char *l2_rw_error_strings[] = {
641 : #define _(sym,string) string,
642 : foreach_l2_rw_error
643 : #undef _
644 : };
645 :
646 : /* *INDENT-OFF* */
647 178120 : VLIB_REGISTER_NODE (l2_rw_node) = {
648 : .name = "l2-rw",
649 : .vector_size = sizeof (u32),
650 : .format_trace = format_l2_rw_trace,
651 : .type = VLIB_NODE_TYPE_INTERNAL,
652 : .n_errors = ARRAY_LEN(l2_rw_error_strings),
653 : .error_strings = l2_rw_error_strings,
654 : .runtime_data_bytes = 0,
655 : .n_next_nodes = L2_RW_N_NEXT,
656 : .next_nodes = { [L2_RW_NEXT_DROP] = "error-drop"},
657 : };
658 : /* *INDENT-ON* */
659 :
660 : /*
661 : * fd.io coding-style-patch-verification: ON
662 : *
663 : * Local Variables:
664 : * eval: (c-set-style "gnu")
665 : * End:
666 : */
|