Line data Source code
1 : /*
2 : * Copyright (c) 2016 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <vnet/dpo/load_balance.h>
17 : #include <vnet/dpo/load_balance_map.h>
18 : #include <vnet/dpo/drop_dpo.h>
19 : #include <vppinfra/math.h> /* for fabs */
20 : #include <vnet/adj/adj.h>
21 : #include <vnet/adj/adj_internal.h>
22 : #include <vnet/fib/fib_urpf_list.h>
23 : #include <vnet/bier/bier_fwd.h>
24 : #include <vnet/fib/mpls_fib.h>
25 : #include <vnet/ip/ip4_inlines.h>
26 : #include <vnet/ip/ip6_inlines.h>
27 :
28 : // clang-format off
29 :
30 : /*
31 : * distribution error tolerance for load-balancing
32 : */
33 : const f64 multipath_next_hop_error_tolerance = 0.1;
34 :
35 : static const char *load_balance_attr_names[] = LOAD_BALANCE_ATTR_NAMES;
36 :
37 : /**
38 : * the logger
39 : */
40 : vlib_log_class_t load_balance_logger;
41 :
42 : #define LB_DBG(_lb, _fmt, _args...) \
43 : { \
44 : vlib_log_debug(load_balance_logger, \
45 : "lb:[%U]:" _fmt, \
46 : format_load_balance, load_balance_get_index(_lb), \
47 : LOAD_BALANCE_FORMAT_NONE, \
48 : ##_args); \
49 : }
50 :
51 : /**
52 : * Pool of all DPOs. It's not static so the DP can have fast access
53 : */
54 : load_balance_t *load_balance_pool;
55 :
56 : /**
57 : * The one instance of load-balance main
58 : */
59 : load_balance_main_t load_balance_main = {
60 : .lbm_to_counters = {
61 : .name = "route-to",
62 : .stat_segment_name = "/net/route/to",
63 : },
64 : .lbm_via_counters = {
65 : .name = "route-via",
66 : .stat_segment_name = "/net/route/via",
67 : }
68 : };
69 :
70 : f64
71 0 : load_balance_get_multipath_tolerance (void)
72 : {
73 0 : return (multipath_next_hop_error_tolerance);
74 : }
75 :
76 : static inline index_t
77 380833 : load_balance_get_index (const load_balance_t *lb)
78 : {
79 380833 : return (lb - load_balance_pool);
80 : }
81 :
82 : static inline dpo_id_t*
83 125429 : load_balance_get_buckets (load_balance_t *lb)
84 : {
85 125429 : if (LB_HAS_INLINE_BUCKETS(lb))
86 : {
87 125109 : return (lb->lb_buckets_inline);
88 : }
89 : else
90 : {
91 320 : return (lb->lb_buckets);
92 : }
93 : }
94 :
95 : static load_balance_t *
96 44439 : load_balance_alloc_i (void)
97 : {
98 : load_balance_t *lb;
99 44439 : u8 need_barrier_sync = 0;
100 44439 : vlib_main_t *vm = vlib_get_main();
101 44439 : ASSERT (vm->thread_index == 0);
102 :
103 44439 : need_barrier_sync = pool_get_will_expand (load_balance_pool);
104 :
105 44439 : if (need_barrier_sync)
106 3958 : vlib_worker_thread_barrier_sync (vm);
107 :
108 44439 : pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
109 44439 : clib_memset(lb, 0, sizeof(*lb));
110 :
111 44439 : lb->lb_map = INDEX_INVALID;
112 44439 : lb->lb_urpf = INDEX_INVALID;
113 :
114 44439 : if (need_barrier_sync == 0)
115 : {
116 40481 : need_barrier_sync += vlib_validate_combined_counter_will_expand
117 : (&(load_balance_main.lbm_to_counters),
118 : load_balance_get_index(lb));
119 40481 : need_barrier_sync += vlib_validate_combined_counter_will_expand
120 : (&(load_balance_main.lbm_via_counters),
121 : load_balance_get_index(lb));
122 40481 : if (need_barrier_sync)
123 3611 : vlib_worker_thread_barrier_sync (vm);
124 : }
125 :
126 44439 : vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
127 : load_balance_get_index(lb));
128 44439 : vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
129 : load_balance_get_index(lb));
130 44439 : vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
131 : load_balance_get_index(lb));
132 44439 : vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
133 : load_balance_get_index(lb));
134 :
135 44439 : if (need_barrier_sync)
136 7569 : vlib_worker_thread_barrier_release (vm);
137 :
138 44439 : return (lb);
139 : }
140 :
141 : static u8*
142 9869 : load_balance_format (index_t lbi,
143 : load_balance_format_flags_t flags,
144 : u32 indent,
145 : u8 *s)
146 : {
147 : vlib_counter_t to, via;
148 : load_balance_t *lb;
149 : dpo_id_t *buckets;
150 : u32 i;
151 :
152 9869 : lb = load_balance_get(lbi);
153 9869 : vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
154 9869 : vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
155 9869 : buckets = load_balance_get_buckets(lb);
156 :
157 9869 : s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
158 9869 : s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
159 9869 : s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
160 9869 : s = format(s, "uRPF:%d ", lb->lb_urpf);
161 9869 : if (lb->lb_flags)
162 : {
163 : load_balance_attr_t attr;
164 :
165 4 : s = format(s, "flags:[");
166 :
167 12 : FOR_EACH_LOAD_BALANCE_ATTR(attr)
168 : {
169 8 : if (lb->lb_flags & (1 << attr))
170 : {
171 4 : s = format (s, "%s", load_balance_attr_names[attr]);
172 : }
173 : }
174 4 : s = format(s, "] ");
175 : }
176 9869 : s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
177 9869 : if (0 != via.packets)
178 : {
179 1909 : s = format(s, " via:[%Ld:%Ld]",
180 : via.packets, via.bytes);
181 : }
182 9869 : s = format(s, "]");
183 :
184 9869 : if (INDEX_INVALID != lb->lb_map)
185 : {
186 4 : s = format(s, "\n%U%U",
187 : format_white_space, indent+4,
188 : format_load_balance_map, lb->lb_map, indent+4);
189 : }
190 22417 : for (i = 0; i < lb->lb_n_buckets; i++)
191 : {
192 12548 : s = format(s, "\n%U[%d] %U",
193 : format_white_space, indent+2,
194 : i,
195 : format_dpo_id,
196 12548 : &buckets[i], indent+6);
197 : }
198 9869 : return (s);
199 : }
200 :
201 : u8*
202 0 : format_load_balance (u8 * s, va_list * args)
203 : {
204 0 : index_t lbi = va_arg(*args, index_t);
205 0 : load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
206 :
207 0 : return (load_balance_format(lbi, flags, 0, s));
208 : }
209 :
210 : static u8*
211 9869 : format_load_balance_dpo (u8 * s, va_list * args)
212 : {
213 9869 : index_t lbi = va_arg(*args, index_t);
214 9869 : u32 indent = va_arg(*args, u32);
215 :
216 9869 : return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
217 : }
218 :
219 : flow_hash_config_t
220 2878 : load_balance_get_default_flow_hash (dpo_proto_t lb_proto)
221 : {
222 2878 : switch (lb_proto)
223 : {
224 147 : case DPO_PROTO_IP4:
225 : case DPO_PROTO_IP6:
226 147 : return (IP_FLOW_HASH_DEFAULT);
227 :
228 149 : case DPO_PROTO_MPLS:
229 149 : return (MPLS_FLOW_HASH_DEFAULT);
230 :
231 2582 : case DPO_PROTO_ETHERNET:
232 : case DPO_PROTO_BIER:
233 : case DPO_PROTO_NSH:
234 2582 : break;
235 : }
236 :
237 2582 : return (0);
238 : }
239 :
240 : static load_balance_t *
241 44439 : load_balance_create_i (u32 num_buckets,
242 : dpo_proto_t lb_proto,
243 : flow_hash_config_t fhc)
244 : {
245 : load_balance_t *lb;
246 :
247 44439 : ASSERT (num_buckets <= LB_MAX_BUCKETS);
248 :
249 44439 : lb = load_balance_alloc_i();
250 44439 : lb->lb_hash_config = fhc;
251 44439 : lb->lb_n_buckets = num_buckets;
252 44439 : lb->lb_n_buckets_minus_1 = num_buckets-1;
253 44439 : lb->lb_proto = lb_proto;
254 :
255 44439 : if (!LB_HAS_INLINE_BUCKETS(lb))
256 : {
257 25 : vec_validate_aligned(lb->lb_buckets,
258 : lb->lb_n_buckets - 1,
259 : CLIB_CACHE_LINE_BYTES);
260 : }
261 :
262 44439 : LB_DBG(lb, "create");
263 :
264 44439 : return (lb);
265 : }
266 :
267 : index_t
268 44439 : load_balance_create (u32 n_buckets,
269 : dpo_proto_t lb_proto,
270 : flow_hash_config_t fhc)
271 : {
272 44439 : return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
273 : }
274 :
275 : static inline void
276 106068 : load_balance_set_bucket_i (load_balance_t *lb,
277 : u32 bucket,
278 : dpo_id_t *buckets,
279 : const dpo_id_t *next)
280 : {
281 106068 : dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
282 106068 : }
283 :
284 : void
285 1193 : load_balance_set_bucket (index_t lbi,
286 : u32 bucket,
287 : const dpo_id_t *next)
288 : {
289 : load_balance_t *lb;
290 : dpo_id_t *buckets;
291 :
292 1193 : lb = load_balance_get(lbi);
293 1193 : buckets = load_balance_get_buckets(lb);
294 :
295 1193 : ASSERT(bucket < lb->lb_n_buckets);
296 :
297 1193 : load_balance_set_bucket_i(lb, bucket, buckets, next);
298 1193 : }
299 :
300 : int
301 6663 : load_balance_is_drop (const dpo_id_t *dpo)
302 : {
303 : load_balance_t *lb;
304 :
305 6663 : if (DPO_LOAD_BALANCE != dpo->dpoi_type)
306 1022 : return (0);
307 :
308 5641 : lb = load_balance_get(dpo->dpoi_index);
309 :
310 5641 : if (1 == lb->lb_n_buckets)
311 : {
312 5474 : return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
313 : }
314 167 : return (0);
315 : }
316 :
317 : u16
318 2878 : load_balance_n_buckets (index_t lbi)
319 : {
320 : load_balance_t *lb;
321 :
322 2878 : lb = load_balance_get(lbi);
323 :
324 2878 : return (lb->lb_n_buckets);
325 : }
326 :
327 : void
328 77857 : load_balance_set_fib_entry_flags (index_t lbi,
329 : fib_entry_flag_t flags)
330 : {
331 : load_balance_t *lb;
332 :
333 77857 : lb = load_balance_get(lbi);
334 77857 : lb->lb_fib_entry_flags = flags;
335 77857 : }
336 :
337 :
338 : void
339 77857 : load_balance_set_urpf (index_t lbi,
340 : index_t urpf)
341 : {
342 : load_balance_t *lb;
343 : index_t old;
344 :
345 77857 : lb = load_balance_get(lbi);
346 :
347 : /*
348 : * packets in flight we see this change. but it's atomic, so :P
349 : */
350 77857 : old = lb->lb_urpf;
351 77857 : lb->lb_urpf = urpf;
352 :
353 77857 : fib_urpf_list_unlock(old);
354 77857 : fib_urpf_list_lock(urpf);
355 77857 : }
356 :
357 : index_t
358 17 : load_balance_get_urpf (index_t lbi)
359 : {
360 : load_balance_t *lb;
361 :
362 17 : lb = load_balance_get(lbi);
363 :
364 17 : return (lb->lb_urpf);
365 : }
366 :
367 : const dpo_id_t *
368 2904 : load_balance_get_bucket (index_t lbi,
369 : u32 bucket)
370 : {
371 : load_balance_t *lb;
372 :
373 2904 : lb = load_balance_get(lbi);
374 :
375 2904 : return (load_balance_get_bucket_i(lb, bucket));
376 : }
377 :
378 : static int
379 82374 : next_hop_sort_by_weight (const load_balance_path_t * n1,
380 : const load_balance_path_t * n2)
381 : {
382 82374 : return ((int) n1->path_weight - (int) n2->path_weight);
383 : }
384 :
385 : /* Given next hop vector is over-written with normalized one with sorted weights and
386 : with weights corresponding to the number of adjacencies for each next hop.
387 : Returns number of adjacencies in block. */
388 : u32
389 81410 : ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops,
390 : load_balance_path_t ** normalized_next_hops,
391 : u32 *sum_weight_in,
392 : f64 multipath_next_hop_error_tolerance)
393 : {
394 : load_balance_path_t * nhs;
395 : uword n_nhs, n_adj, n_adj_left, i, sum_weight;
396 : f64 norm, error;
397 :
398 81410 : n_nhs = vec_len (raw_next_hops);
399 81410 : ASSERT (n_nhs > 0);
400 81410 : if (n_nhs == 0)
401 0 : return 0;
402 :
403 : /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
404 81410 : nhs = *normalized_next_hops;
405 81410 : vec_validate (nhs, 2*n_nhs - 1);
406 :
407 : /* Fast path: 1 next hop in block. */
408 81410 : n_adj = n_nhs;
409 81410 : if (n_nhs == 1)
410 : {
411 78061 : nhs[0] = raw_next_hops[0];
412 78061 : nhs[0].path_weight = 1;
413 78061 : vec_set_len (nhs, 1);
414 78061 : sum_weight = 1;
415 78061 : goto done;
416 : }
417 :
418 3349 : else if (n_nhs == 2)
419 : {
420 3086 : int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
421 :
422 : /* Fast sort. */
423 3086 : nhs[0] = raw_next_hops[cmp];
424 3086 : nhs[1] = raw_next_hops[cmp ^ 1];
425 :
426 : /* Fast path: equal cost multipath with 2 next hops. */
427 3086 : if (nhs[0].path_weight == nhs[1].path_weight)
428 : {
429 3077 : nhs[0].path_weight = nhs[1].path_weight = 1;
430 3077 : vec_set_len (nhs, 2);
431 3077 : sum_weight = 2;
432 3077 : goto done;
433 : }
434 : }
435 : else
436 : {
437 263 : clib_memcpy_fast (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
438 263 : qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
439 : }
440 :
441 : /* Find total weight to normalize weights. */
442 272 : sum_weight = 0;
443 13763 : for (i = 0; i < n_nhs; i++)
444 13491 : sum_weight += nhs[i].path_weight;
445 :
446 : /* In the unlikely case that all weights are given as 0, set them all to 1. */
447 272 : if (sum_weight == 0)
448 : {
449 0 : for (i = 0; i < n_nhs; i++)
450 0 : nhs[i].path_weight = 1;
451 0 : sum_weight = n_nhs;
452 : }
453 :
454 : /* Save copies of all next hop weights to avoid being overwritten in loop below. */
455 13763 : for (i = 0; i < n_nhs; i++)
456 13491 : nhs[n_nhs + i].path_weight = nhs[i].path_weight;
457 :
458 : /* Try larger and larger power of 2 sized adjacency blocks until we
459 : find one where traffic flows to within 1% of specified weights. */
460 732 : for (n_adj = clib_min(max_pow2 (n_nhs), LB_MAX_BUCKETS); ; n_adj *= 2)
461 : {
462 732 : ASSERT (n_adj <= LB_MAX_BUCKETS);
463 732 : error = 0;
464 :
465 732 : norm = n_adj / ((f64) sum_weight);
466 732 : n_adj_left = n_adj;
467 15512 : for (i = 0; i < n_nhs; i++)
468 : {
469 14804 : f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
470 14804 : word n = flt_round_nearest (nf);
471 :
472 14804 : n = n > n_adj_left ? n_adj_left : n;
473 14804 : n_adj_left -= n;
474 14804 : error += fabs (nf - n);
475 14804 : nhs[i].path_weight = n;
476 :
477 14804 : if (0 == nhs[i].path_weight)
478 : {
479 : /*
480 : * when the weight skew is high (norm is small) and n == nf.
481 : * without this correction the path with a low weight would have
482 : * no representation in the load-balanace - don't want that.
483 : * If the weight skew is high so the load-balance has many buckets
484 : * to allow it. pays ya money takes ya choice.
485 : */
486 24 : error = n_adj;
487 24 : break;
488 : }
489 : }
490 :
491 732 : nhs[0].path_weight += n_adj_left;
492 :
493 : /* Less than 1% average error per adjacency with this size adjacency block,
494 : * or did we reached the maximum number of buckets we support? */
495 732 : if (error <= multipath_next_hop_error_tolerance*n_adj ||
496 : n_adj >= LB_MAX_BUCKETS)
497 : {
498 272 : if (i < n_nhs)
499 : {
500 : /* Truncate any next hops in excess */
501 2 : vlib_log_err(load_balance_logger,
502 : "Too many paths for load-balance, truncating %d -> %d",
503 : n_nhs, i);
504 38 : for (int j = i; j < n_nhs; j++)
505 36 : dpo_reset (&vec_elt(nhs, j).path_dpo);
506 : }
507 272 : vec_set_len (nhs, i);
508 272 : break;
509 : }
510 : }
511 :
512 81410 : done:
513 : /* Save vector for next call. */
514 81410 : *normalized_next_hops = nhs;
515 81410 : *sum_weight_in = sum_weight;
516 81410 : return n_adj;
517 : }
518 :
519 : static load_balance_path_t *
520 81410 : load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs,
521 : dpo_proto_t drop_proto)
522 : {
523 81410 : if (0 == vec_len(nhs))
524 : {
525 23085 : load_balance_path_t *new_nhs = NULL, *nh;
526 :
527 : /*
528 : * we need something for the load-balance. so use the drop
529 : */
530 23085 : vec_add2(new_nhs, nh, 1);
531 :
532 23085 : nh->path_weight = 1;
533 23085 : dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
534 :
535 23085 : return (new_nhs);
536 : }
537 :
538 58325 : return (NULL);
539 : }
540 :
541 : /*
542 : * Fill in adjacencies in block based on corresponding
543 : * next hop adjacencies.
544 : */
545 : static void
546 81410 : load_balance_fill_buckets_norm (load_balance_t *lb,
547 : load_balance_path_t *nhs,
548 : dpo_id_t *buckets,
549 : u32 n_buckets)
550 : {
551 : load_balance_path_t *nh;
552 : u16 ii, bucket;
553 :
554 81410 : bucket = 0;
555 :
556 : /*
557 : * the next-hops have normalised weights. that means their sum is the number
558 : * of buckets we need to fill.
559 : */
560 179080 : vec_foreach (nh, nhs)
561 : {
562 202545 : for (ii = 0; ii < nh->path_weight; ii++)
563 : {
564 104875 : ASSERT(bucket < n_buckets);
565 104875 : load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
566 : }
567 : }
568 81410 : }
569 : static void
570 0 : load_balance_fill_buckets_sticky (load_balance_t *lb,
571 : load_balance_path_t *nhs,
572 : dpo_id_t *buckets,
573 : u32 n_buckets)
574 : {
575 : load_balance_path_t *nh, *fwding_paths;
576 : u16 ii, bucket, fpath;
577 :
578 0 : fpath = bucket = 0;
579 0 : fwding_paths = NULL;
580 :
581 0 : vec_foreach (nh, nhs)
582 : {
583 0 : if (!dpo_is_drop(&nh->path_dpo))
584 : {
585 0 : vec_add1(fwding_paths, *nh);
586 : }
587 : }
588 0 : if (vec_len(fwding_paths) == 0)
589 0 : fwding_paths = vec_dup(nhs);
590 :
591 : /*
592 : * the next-hops have normalised weights. that means their sum is the number
593 : * of buckets we need to fill.
594 : */
595 0 : vec_foreach (nh, nhs)
596 : {
597 0 : for (ii = 0; ii < nh->path_weight; ii++)
598 : {
599 0 : ASSERT(bucket < n_buckets);
600 0 : if (!dpo_is_drop(&nh->path_dpo))
601 : {
602 0 : load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
603 : }
604 : else
605 : {
606 : /* fill the bucks from the next up path */
607 0 : load_balance_set_bucket_i(lb, bucket++, buckets, &fwding_paths[fpath].path_dpo);
608 0 : ASSERT(vec_len(fwding_paths) > 0);
609 0 : fpath = (fpath + 1) % vec_len(fwding_paths);
610 : }
611 : }
612 : }
613 :
614 0 : vec_free(fwding_paths);
615 0 : }
616 :
617 : static void
618 81410 : load_balance_fill_buckets (load_balance_t *lb,
619 : load_balance_path_t *nhs,
620 : dpo_id_t *buckets,
621 : u32 n_buckets,
622 : load_balance_flags_t flags)
623 : {
624 81410 : if (flags & LOAD_BALANCE_FLAG_STICKY)
625 : {
626 0 : load_balance_fill_buckets_sticky(lb, nhs, buckets, n_buckets);
627 : }
628 : else
629 : {
630 81410 : load_balance_fill_buckets_norm(lb, nhs, buckets, n_buckets);
631 : }
632 81410 : }
633 :
634 : static inline void
635 42590 : load_balance_set_n_buckets (load_balance_t *lb,
636 : u32 n_buckets)
637 : {
638 42590 : ASSERT (n_buckets <= LB_MAX_BUCKETS);
639 42590 : lb->lb_n_buckets = n_buckets;
640 42590 : lb->lb_n_buckets_minus_1 = n_buckets-1;
641 42590 : }
642 :
643 : void
644 81410 : load_balance_multipath_update (const dpo_id_t *dpo,
645 : const load_balance_path_t * raw_nhs,
646 : load_balance_flags_t flags)
647 : {
648 : load_balance_path_t *nh, *nhs, *fixed_nhs;
649 : u32 sum_of_weights, n_buckets, ii;
650 : index_t lbmi, old_lbmi;
651 : load_balance_t *lb;
652 : dpo_id_t *tmp_dpo;
653 :
654 81410 : nhs = NULL;
655 :
656 81410 : ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
657 81410 : lb = load_balance_get(dpo->dpoi_index);
658 81410 : lb->lb_flags = flags;
659 81410 : fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
660 : n_buckets =
661 81410 : ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
662 : raw_nhs :
663 : fixed_nhs),
664 : &nhs,
665 : &sum_of_weights,
666 : multipath_next_hop_error_tolerance);
667 :
668 : /*
669 : * Save the old load-balance map used, and get a new one if required.
670 : */
671 81410 : old_lbmi = lb->lb_map;
672 81410 : if (flags & LOAD_BALANCE_FLAG_USES_MAP)
673 : {
674 1931 : lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
675 : }
676 : else
677 : {
678 79479 : lbmi = INDEX_INVALID;
679 : }
680 :
681 81410 : if (0 == lb->lb_n_buckets)
682 : {
683 : /*
684 : * first time initialisation. no packets inflight, so we can write
685 : * at leisure.
686 : */
687 40550 : load_balance_set_n_buckets(lb, n_buckets);
688 :
689 40550 : if (!LB_HAS_INLINE_BUCKETS(lb))
690 4 : vec_validate_aligned(lb->lb_buckets,
691 : lb->lb_n_buckets - 1,
692 : CLIB_CACHE_LINE_BYTES);
693 :
694 40550 : load_balance_fill_buckets(lb, nhs,
695 : load_balance_get_buckets(lb),
696 : n_buckets, flags);
697 40550 : lb->lb_map = lbmi;
698 : }
699 : else
700 : {
701 : /*
702 : * This is a modification of an existing load-balance.
703 : * We need to ensure that packets inflight see a consistent state, that
704 : * is the number of reported buckets the LB has (read from
705 : * lb_n_buckets_minus_1) is not more than it actually has. So if the
706 : * number of buckets is increasing, we must update the bucket array first,
707 : * then the reported number. vice-versa if the number of buckets goes down.
708 : */
709 40860 : if (n_buckets == lb->lb_n_buckets)
710 : {
711 : /*
712 : * no change in the number of buckets. we can simply fill what
713 : * is new over what is old.
714 : */
715 38820 : load_balance_fill_buckets(lb, nhs,
716 : load_balance_get_buckets(lb),
717 : n_buckets, flags);
718 38820 : lb->lb_map = lbmi;
719 : }
720 2040 : else if (n_buckets > lb->lb_n_buckets)
721 : {
722 : /*
723 : * we have more buckets. the old load-balance map (if there is one)
724 : * will remain valid, i.e. mapping to indices within range, so we
725 : * update it last.
726 : */
727 923 : if (n_buckets > LB_NUM_INLINE_BUCKETS &&
728 154 : lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
729 : {
730 : /*
731 : * the new increased number of buckets is crossing the threshold
732 : * from the inline storage to out-line. Alloc the outline buckets
733 : * first, then fixup the number. then reset the inlines.
734 : */
735 153 : ASSERT(NULL == lb->lb_buckets);
736 153 : vec_validate_aligned(lb->lb_buckets,
737 : n_buckets - 1,
738 : CLIB_CACHE_LINE_BYTES);
739 :
740 153 : load_balance_fill_buckets(lb, nhs,
741 : lb->lb_buckets,
742 : n_buckets, flags);
743 153 : CLIB_MEMORY_BARRIER();
744 153 : load_balance_set_n_buckets(lb, n_buckets);
745 :
746 153 : CLIB_MEMORY_BARRIER();
747 :
748 765 : for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
749 : {
750 612 : dpo_reset(&lb->lb_buckets_inline[ii]);
751 : }
752 : }
753 : else
754 : {
755 770 : if (n_buckets <= LB_NUM_INLINE_BUCKETS)
756 : {
757 : /*
758 : * we are not crossing the threshold and it's still inline buckets.
759 : * we can write the new on the old..
760 : */
761 769 : load_balance_fill_buckets(lb, nhs,
762 : load_balance_get_buckets(lb),
763 : n_buckets, flags);
764 769 : CLIB_MEMORY_BARRIER();
765 769 : load_balance_set_n_buckets(lb, n_buckets);
766 : }
767 : else
768 : {
769 : /*
770 : * we are not crossing the threshold. We need a new bucket array to
771 : * hold the increased number of choices.
772 : */
773 : dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
774 :
775 1 : new_buckets = NULL;
776 1 : old_buckets = load_balance_get_buckets(lb);
777 :
778 1 : vec_validate_aligned(new_buckets,
779 : n_buckets - 1,
780 : CLIB_CACHE_LINE_BYTES);
781 :
782 1 : load_balance_fill_buckets(lb, nhs, new_buckets,
783 : n_buckets, flags);
784 1 : CLIB_MEMORY_BARRIER();
785 1 : lb->lb_buckets = new_buckets;
786 1 : CLIB_MEMORY_BARRIER();
787 1 : load_balance_set_n_buckets(lb, n_buckets);
788 :
789 65 : vec_foreach(tmp_dpo, old_buckets)
790 : {
791 64 : dpo_reset(tmp_dpo);
792 : }
793 1 : vec_free(old_buckets);
794 : }
795 : }
796 :
797 : /*
798 : * buckets fixed. ready for the MAP update.
799 : */
800 923 : lb->lb_map = lbmi;
801 : }
802 : else
803 : {
804 : /*
805 : * bucket size shrinkage.
806 : * Any map we have will be based on the old
807 : * larger number of buckets, so will be translating to indices
808 : * out of range. So the new MAP must be installed first.
809 : */
810 1117 : lb->lb_map = lbmi;
811 1117 : CLIB_MEMORY_BARRIER();
812 :
813 :
814 1117 : if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
815 1116 : lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
816 : {
817 : /*
818 : * the new decreased number of buckets is crossing the threshold
819 : * from out-line storage to inline:
820 : * 1 - Fill the inline buckets,
821 : * 2 - fixup the number (and this point the inline buckets are
822 : * used).
823 : * 3 - free the outline buckets
824 : */
825 134 : load_balance_fill_buckets(lb, nhs,
826 134 : lb->lb_buckets_inline,
827 : n_buckets, flags);
828 134 : CLIB_MEMORY_BARRIER();
829 134 : load_balance_set_n_buckets(lb, n_buckets);
830 134 : CLIB_MEMORY_BARRIER();
831 :
832 2390 : vec_foreach(tmp_dpo, lb->lb_buckets)
833 : {
834 2256 : dpo_reset(tmp_dpo);
835 : }
836 134 : vec_free(lb->lb_buckets);
837 : }
838 : else
839 : {
840 : /*
841 : * not crossing the threshold.
842 : * 1 - update the number to the smaller size
843 : * 2 - write the new buckets
844 : * 3 - reset those no longer used.
845 : */
846 : dpo_id_t *buckets;
847 : u32 old_n_buckets;
848 :
849 983 : old_n_buckets = lb->lb_n_buckets;
850 983 : buckets = load_balance_get_buckets(lb);
851 :
852 983 : load_balance_set_n_buckets(lb, n_buckets);
853 983 : CLIB_MEMORY_BARRIER();
854 :
855 983 : load_balance_fill_buckets(lb, nhs, buckets,
856 : n_buckets, flags);
857 :
858 2029 : for (ii = n_buckets; ii < old_n_buckets; ii++)
859 : {
860 1046 : dpo_reset(&buckets[ii]);
861 : }
862 : }
863 : }
864 : }
865 :
866 179080 : vec_foreach (nh, nhs)
867 : {
868 97670 : dpo_reset(&nh->path_dpo);
869 : }
870 81410 : vec_free(nhs);
871 81410 : vec_free(fixed_nhs);
872 :
873 81410 : load_balance_map_unlock(old_lbmi);
874 81410 : }
875 :
876 : static void
877 127694 : load_balance_lock (dpo_id_t *dpo)
878 : {
879 : load_balance_t *lb;
880 :
881 127694 : lb = load_balance_get(dpo->dpoi_index);
882 :
883 127694 : lb->lb_locks++;
884 127694 : }
885 :
886 : static void
887 33237 : load_balance_destroy (load_balance_t *lb)
888 : {
889 : dpo_id_t *buckets;
890 : int i;
891 :
892 33237 : buckets = load_balance_get_buckets(lb);
893 :
894 84187 : for (i = 0; i < lb->lb_n_buckets; i++)
895 : {
896 50950 : dpo_reset(&buckets[i]);
897 : }
898 :
899 33237 : LB_DBG(lb, "destroy");
900 33237 : if (!LB_HAS_INLINE_BUCKETS(lb))
901 : {
902 48 : vec_free(lb->lb_buckets);
903 : }
904 :
905 33237 : fib_urpf_list_unlock(lb->lb_urpf);
906 33237 : load_balance_map_unlock(lb->lb_map);
907 :
908 33237 : pool_put(load_balance_pool, lb);
909 33237 : }
910 :
911 : static void
912 116871 : load_balance_unlock (dpo_id_t *dpo)
913 : {
914 : load_balance_t *lb;
915 :
916 116871 : lb = load_balance_get(dpo->dpoi_index);
917 :
918 116871 : lb->lb_locks--;
919 :
920 116871 : if (0 == lb->lb_locks)
921 : {
922 33237 : load_balance_destroy(lb);
923 : }
924 116871 : }
925 :
926 : static void
927 0 : load_balance_mem_show (void)
928 : {
929 0 : fib_show_memory_usage("load-balance",
930 0 : pool_elts(load_balance_pool),
931 0 : pool_len(load_balance_pool),
932 : sizeof(load_balance_t));
933 0 : load_balance_map_show_mem();
934 0 : }
935 :
936 : static u16
937 7 : load_balance_dpo_get_mtu (const dpo_id_t *dpo)
938 : {
939 : const dpo_id_t *buckets;
940 : load_balance_t *lb;
941 7 : u16 i, mtu = 0xffff;
942 :
943 7 : lb = load_balance_get(dpo->dpoi_index);
944 7 : buckets = load_balance_get_buckets(lb);
945 :
946 14 : for (i = 0; i < lb->lb_n_buckets; i++)
947 : {
948 7 : mtu = clib_min (mtu, dpo_get_mtu (&buckets[i]));
949 : }
950 :
951 7 : return (mtu);
952 : }
953 :
954 : const static dpo_vft_t lb_vft = {
955 : .dv_lock = load_balance_lock,
956 : .dv_unlock = load_balance_unlock,
957 : .dv_format = format_load_balance_dpo,
958 : .dv_mem_show = load_balance_mem_show,
959 : .dv_get_mtu = load_balance_dpo_get_mtu,
960 : };
961 :
962 : /**
963 : * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
964 : * object.
965 : *
966 : * this means that these graph nodes are ones from which a load-balance is the
967 : * parent object in the DPO-graph.
968 : *
969 : * We do not list all the load-balance nodes, such as the *-lookup. instead
970 : * we are relying on the correct use of the .sibling_of field when setting
971 : * up these sibling nodes.
972 : */
973 : const static char* const load_balance_ip4_nodes[] =
974 : {
975 : "ip4-load-balance",
976 : NULL,
977 : };
978 : const static char* const load_balance_ip6_nodes[] =
979 : {
980 : "ip6-load-balance",
981 : NULL,
982 : };
983 : const static char* const load_balance_mpls_nodes[] =
984 : {
985 : "mpls-load-balance",
986 : NULL,
987 : };
988 : const static char* const load_balance_l2_nodes[] =
989 : {
990 : "l2-load-balance",
991 : NULL,
992 : };
993 : const static char* const load_balance_nsh_nodes[] =
994 : {
995 : "nsh-load-balance",
996 : NULL
997 : };
998 : const static char* const load_balance_bier_nodes[] =
999 : {
1000 : "bier-load-balance",
1001 : NULL,
1002 : };
1003 : const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
1004 : {
1005 : [DPO_PROTO_IP4] = load_balance_ip4_nodes,
1006 : [DPO_PROTO_IP6] = load_balance_ip6_nodes,
1007 : [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
1008 : [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
1009 : [DPO_PROTO_NSH] = load_balance_nsh_nodes,
1010 : [DPO_PROTO_BIER] = load_balance_bier_nodes,
1011 : };
1012 :
1013 : void
1014 559 : load_balance_module_init (void)
1015 : {
1016 : index_t lbi;
1017 :
1018 559 : dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
1019 :
1020 : /*
1021 : * Special LB with index zero. we need to define this since the v4 mtrie
1022 : * assumes an index of 0 implies the ply is empty. therefore all 'real'
1023 : * adjs need a non-zero index.
1024 : * This should never be used, but just in case, stack it on a drop.
1025 : */
1026 559 : lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
1027 559 : load_balance_set_bucket(lbi, 0, drop_dpo_get(DPO_PROTO_IP4));
1028 :
1029 559 : load_balance_logger =
1030 559 : vlib_log_register_class("dpo", "load-balance");
1031 :
1032 559 : load_balance_map_module_init();
1033 559 : }
1034 :
1035 : static clib_error_t *
1036 0 : load_balance_show (vlib_main_t * vm,
1037 : unformat_input_t * input,
1038 : vlib_cli_command_t * cmd)
1039 : {
1040 0 : index_t lbi = INDEX_INVALID;
1041 :
1042 0 : while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1043 : {
1044 0 : if (unformat (input, "%d", &lbi))
1045 : ;
1046 : else
1047 0 : break;
1048 : }
1049 :
1050 0 : if (INDEX_INVALID != lbi)
1051 : {
1052 0 : if (pool_is_free_index(load_balance_pool, lbi))
1053 : {
1054 0 : vlib_cli_output (vm, "no such load-balance:%d", lbi);
1055 : }
1056 : else
1057 : {
1058 0 : vlib_cli_output (vm, "%U", format_load_balance, lbi,
1059 : LOAD_BALANCE_FORMAT_DETAIL);
1060 : }
1061 : }
1062 : else
1063 : {
1064 : load_balance_t *lb;
1065 :
1066 0 : pool_foreach (lb, load_balance_pool)
1067 : {
1068 0 : vlib_cli_output (vm, "%U", format_load_balance,
1069 : load_balance_get_index(lb),
1070 : LOAD_BALANCE_FORMAT_NONE);
1071 : }
1072 : }
1073 :
1074 0 : return 0;
1075 : }
1076 :
1077 272887 : VLIB_CLI_COMMAND (load_balance_show_command, static) = {
1078 : .path = "show load-balance",
1079 : .short_help = "show load-balance [<index>]",
1080 : .function = load_balance_show,
1081 : };
1082 :
1083 :
1084 : always_inline u32
1085 206 : ip_flow_hash (void *data)
1086 : {
1087 206 : ip4_header_t *iph = (ip4_header_t *) data;
1088 :
1089 206 : if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
1090 206 : return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
1091 : else
1092 0 : return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
1093 : }
1094 :
1095 : always_inline u64
1096 412 : mac_to_u64 (u8 * m)
1097 : {
1098 412 : return (*((u64 *) m) & 0xffffffffffff);
1099 : }
1100 :
1101 : always_inline u32
1102 206 : l2_flow_hash (vlib_buffer_t * b0)
1103 : {
1104 : ethernet_header_t *eh;
1105 : u64 a, b, c;
1106 : uword is_ip, eh_size;
1107 : u16 eh_type;
1108 :
1109 206 : eh = vlib_buffer_get_current (b0);
1110 206 : eh_type = clib_net_to_host_u16 (eh->type);
1111 206 : eh_size = ethernet_buffer_header_size (b0);
1112 :
1113 206 : is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
1114 :
1115 : /* since we have 2 cache lines, use them */
1116 206 : if (is_ip)
1117 206 : a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
1118 : else
1119 0 : a = eh->type;
1120 :
1121 206 : b = mac_to_u64 ((u8 *) eh->dst_address);
1122 206 : c = mac_to_u64 ((u8 *) eh->src_address);
1123 206 : hash_mix64 (a, b, c);
1124 :
1125 206 : return (u32) c;
1126 : }
1127 :
1128 : typedef struct load_balance_trace_t_
1129 : {
1130 : index_t lb_index;
1131 : } load_balance_trace_t;
1132 :
1133 : always_inline uword
1134 11 : load_balance_inline (vlib_main_t * vm,
1135 : vlib_node_runtime_t * node,
1136 : vlib_frame_t * frame,
1137 : int is_l2)
1138 : {
1139 : u32 n_left_from, next_index, *from, *to_next;
1140 :
1141 11 : from = vlib_frame_vector_args (frame);
1142 11 : n_left_from = frame->n_vectors;
1143 :
1144 11 : next_index = node->cached_next_index;
1145 :
1146 22 : while (n_left_from > 0)
1147 : {
1148 : u32 n_left_to_next;
1149 :
1150 11 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1151 :
1152 354 : while (n_left_from > 0 && n_left_to_next > 0)
1153 : {
1154 : vlib_buffer_t *b0;
1155 : u32 bi0, lbi0, next0;
1156 : const dpo_id_t *dpo0;
1157 : const load_balance_t *lb0;
1158 :
1159 343 : bi0 = from[0];
1160 343 : to_next[0] = bi0;
1161 343 : from += 1;
1162 343 : to_next += 1;
1163 343 : n_left_from -= 1;
1164 343 : n_left_to_next -= 1;
1165 :
1166 343 : b0 = vlib_get_buffer (vm, bi0);
1167 :
1168 : /* lookup dst + src mac */
1169 343 : lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1170 343 : lb0 = load_balance_get(lbi0);
1171 :
1172 343 : if (is_l2)
1173 : {
1174 206 : vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
1175 : }
1176 : else
1177 : {
1178 : /* it's BIER */
1179 137 : const bier_hdr_t *bh0 = vlib_buffer_get_current(b0);
1180 137 : vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
1181 : }
1182 :
1183 343 : dpo0 = load_balance_get_bucket_i(lb0,
1184 343 : vnet_buffer(b0)->ip.flow_hash &
1185 343 : (lb0->lb_n_buckets_minus_1));
1186 :
1187 343 : next0 = dpo0->dpoi_next_node;
1188 343 : vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1189 :
1190 343 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1191 : {
1192 343 : load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1193 : sizeof (*tr));
1194 343 : tr->lb_index = lbi0;
1195 : }
1196 343 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1197 : n_left_to_next, bi0, next0);
1198 : }
1199 :
1200 11 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1201 : }
1202 :
1203 11 : return frame->n_vectors;
1204 : }
1205 :
1206 : static uword
1207 6 : l2_load_balance (vlib_main_t * vm,
1208 : vlib_node_runtime_t * node,
1209 : vlib_frame_t * frame)
1210 : {
1211 6 : return (load_balance_inline(vm, node, frame, 1));
1212 : }
1213 :
1214 : static u8 *
1215 222 : format_l2_load_balance_trace (u8 * s, va_list * args)
1216 : {
1217 222 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1218 222 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1219 222 : load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1220 :
1221 222 : s = format (s, "L2-load-balance: index %d", t->lb_index);
1222 222 : return s;
1223 : }
1224 :
1225 : /**
1226 : * @brief
1227 : */
1228 178120 : VLIB_REGISTER_NODE (l2_load_balance_node) = {
1229 : .function = l2_load_balance,
1230 : .name = "l2-load-balance",
1231 : .vector_size = sizeof (u32),
1232 :
1233 : .format_trace = format_l2_load_balance_trace,
1234 : .n_next_nodes = 1,
1235 : .next_nodes = {
1236 : [0] = "error-drop",
1237 : },
1238 : };
1239 :
1240 : static uword
1241 0 : nsh_load_balance (vlib_main_t * vm,
1242 : vlib_node_runtime_t * node,
1243 : vlib_frame_t * frame)
1244 : {
1245 : u32 n_left_from, next_index, *from, *to_next;
1246 :
1247 0 : from = vlib_frame_vector_args (frame);
1248 0 : n_left_from = frame->n_vectors;
1249 :
1250 0 : next_index = node->cached_next_index;
1251 :
1252 0 : while (n_left_from > 0)
1253 : {
1254 : u32 n_left_to_next;
1255 :
1256 0 : vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1257 :
1258 0 : while (n_left_from > 0 && n_left_to_next > 0)
1259 : {
1260 : vlib_buffer_t *b0;
1261 : u32 bi0, lbi0, next0, *nsh0;
1262 : const dpo_id_t *dpo0;
1263 : const load_balance_t *lb0;
1264 :
1265 0 : bi0 = from[0];
1266 0 : to_next[0] = bi0;
1267 0 : from += 1;
1268 0 : to_next += 1;
1269 0 : n_left_from -= 1;
1270 0 : n_left_to_next -= 1;
1271 :
1272 0 : b0 = vlib_get_buffer (vm, bi0);
1273 :
1274 0 : lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1275 0 : lb0 = load_balance_get(lbi0);
1276 :
1277 : /* SPI + SI are the second word of the NSH header */
1278 0 : nsh0 = vlib_buffer_get_current (b0);
1279 0 : vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1280 :
1281 0 : dpo0 = load_balance_get_bucket_i(lb0,
1282 0 : vnet_buffer(b0)->ip.flow_hash &
1283 0 : (lb0->lb_n_buckets_minus_1));
1284 :
1285 0 : next0 = dpo0->dpoi_next_node;
1286 0 : vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1287 :
1288 0 : if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1289 : {
1290 0 : load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1291 : sizeof (*tr));
1292 0 : tr->lb_index = lbi0;
1293 : }
1294 0 : vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1295 : n_left_to_next, bi0, next0);
1296 : }
1297 :
1298 0 : vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1299 : }
1300 :
1301 0 : return frame->n_vectors;
1302 : }
1303 :
1304 : static u8 *
1305 0 : format_nsh_load_balance_trace (u8 * s, va_list * args)
1306 : {
1307 0 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1308 0 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1309 0 : load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1310 :
1311 0 : s = format (s, "NSH-load-balance: index %d", t->lb_index);
1312 0 : return s;
1313 : }
1314 :
1315 : /**
1316 : * @brief
1317 : */
1318 178120 : VLIB_REGISTER_NODE (nsh_load_balance_node) = {
1319 : .function = nsh_load_balance,
1320 : .name = "nsh-load-balance",
1321 : .vector_size = sizeof (u32),
1322 :
1323 : .format_trace = format_nsh_load_balance_trace,
1324 : .n_next_nodes = 1,
1325 : .next_nodes = {
1326 : [0] = "error-drop",
1327 : },
1328 : };
1329 :
1330 : static u8 *
1331 171 : format_bier_load_balance_trace (u8 * s, va_list * args)
1332 : {
1333 171 : CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1334 171 : CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1335 171 : load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1336 :
1337 171 : s = format (s, "BIER-load-balance: index %d", t->lb_index);
1338 171 : return s;
1339 : }
1340 :
1341 : static uword
1342 5 : bier_load_balance (vlib_main_t * vm,
1343 : vlib_node_runtime_t * node,
1344 : vlib_frame_t * frame)
1345 : {
1346 5 : return (load_balance_inline(vm, node, frame, 0));
1347 : }
1348 :
1349 : /**
1350 : * @brief
1351 : */
1352 178120 : VLIB_REGISTER_NODE (bier_load_balance_node) = {
1353 : .function = bier_load_balance,
1354 : .name = "bier-load-balance",
1355 : .vector_size = sizeof (u32),
1356 :
1357 : .format_trace = format_bier_load_balance_trace,
1358 : .sibling_of = "mpls-load-balance",
1359 : };
1360 :
1361 : // clang-format on
|