Line data Source code
1 : /*
2 : * Copyright (c) 2021 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #ifndef included_pnat_node_h
17 : #define included_pnat_node_h
18 :
19 : #include "pnat.h"
20 : #include <pnat/pnat.api_enum.h>
21 : #include <vnet/feature/feature.h>
22 : #include <vnet/udp/udp_packet.h>
23 : #include <vnet/tcp/tcp_packet.h>
24 : #include <vnet/ip/format.h>
25 :
26 : /* PNAT next-nodes */
27 : typedef enum { PNAT_NEXT_DROP, PNAT_N_NEXT } pnat_next_t;
28 :
29 : u8 *format_pnat_match_tuple(u8 *s, va_list *args);
30 : u8 *format_pnat_rewrite_tuple(u8 *s, va_list *args);
31 16 : static inline u8 *format_pnat_trace(u8 *s, va_list *args) {
32 16 : CLIB_UNUSED(vlib_main_t * vm) = va_arg(*args, vlib_main_t *);
33 16 : CLIB_UNUSED(vlib_node_t * node) = va_arg(*args, vlib_node_t *);
34 16 : pnat_trace_t *t = va_arg(*args, pnat_trace_t *);
35 :
36 16 : s = format(s, "pnat: index %d\n", t->pool_index);
37 16 : if (t->pool_index != ~0) {
38 9 : s = format(s, " match: %U\n", format_pnat_match_tuple,
39 : &t->match);
40 9 : s = format(s, " rewrite: %U", format_pnat_rewrite_tuple,
41 : &t->rewrite);
42 : }
43 16 : return s;
44 : }
45 :
46 : /*
47 : * Given a packet and rewrite instructions from a translation modify packet.
48 : */
49 : // TODO: Generalize to write with mask
50 9 : static u32 pnat_rewrite_ip4(u32 pool_index, ip4_header_t *ip) {
51 9 : pnat_main_t *pm = &pnat_main;
52 9 : if (pool_is_free_index(pm->translations, pool_index))
53 0 : return PNAT_ERROR_REWRITE;
54 9 : pnat_translation_t *t = pool_elt_at_index(pm->translations, pool_index);
55 :
56 9 : ip_csum_t csumd = 0;
57 :
58 9 : if (t->instructions & PNAT_INSTR_DESTINATION_ADDRESS) {
59 4 : csumd = ip_csum_sub_even(csumd, ip->dst_address.as_u32);
60 4 : csumd = ip_csum_add_even(csumd, t->post_da.as_u32);
61 4 : ip->dst_address = t->post_da;
62 : }
63 9 : if (t->instructions & PNAT_INSTR_SOURCE_ADDRESS) {
64 4 : csumd = ip_csum_sub_even(csumd, ip->src_address.as_u32);
65 4 : csumd = ip_csum_add_even(csumd, t->post_sa.as_u32);
66 4 : ip->src_address = t->post_sa;
67 : }
68 :
69 9 : ip_csum_t csum = ip->checksum;
70 9 : csum = ip_csum_sub_even(csum, csumd);
71 9 : ip->checksum = ip_csum_fold(csum);
72 9 : if (ip->checksum == 0xffff)
73 0 : ip->checksum = 0;
74 9 : ASSERT(ip->checksum == ip4_header_checksum(ip));
75 :
76 9 : u16 plen = clib_net_to_host_u16(ip->length);
77 :
78 : /* Nothing more to do if this is a fragment. */
79 9 : if (ip4_is_fragment(ip))
80 0 : return PNAT_ERROR_NONE;
81 :
82 : /* L4 ports */
83 9 : if (ip->protocol == IP_PROTOCOL_TCP) {
84 : /* Assume IP4 header is 20 bytes */
85 0 : if (plen < sizeof(ip4_header_t) + sizeof(tcp_header_t))
86 0 : return PNAT_ERROR_TOOSHORT;
87 :
88 0 : tcp_header_t *tcp = ip4_next_header(ip);
89 0 : ip_csum_t l4csum = tcp->checksum;
90 0 : if (t->instructions & PNAT_INSTR_DESTINATION_PORT) {
91 0 : l4csum = ip_csum_sub_even(l4csum, tcp->dst_port);
92 0 : l4csum = ip_csum_add_even(l4csum, clib_net_to_host_u16(t->post_dp));
93 0 : tcp->dst_port = clib_net_to_host_u16(t->post_dp);
94 : }
95 0 : if (t->instructions & PNAT_INSTR_SOURCE_PORT) {
96 0 : l4csum = ip_csum_sub_even(l4csum, tcp->src_port);
97 0 : l4csum = ip_csum_add_even(l4csum, clib_net_to_host_u16(t->post_sp));
98 0 : tcp->src_port = clib_net_to_host_u16(t->post_sp);
99 : }
100 0 : l4csum = ip_csum_sub_even(l4csum, csumd);
101 0 : tcp->checksum = ip_csum_fold(l4csum);
102 9 : } else if (ip->protocol == IP_PROTOCOL_UDP) {
103 4 : if (plen < sizeof(ip4_header_t) + sizeof(udp_header_t))
104 0 : return PNAT_ERROR_TOOSHORT;
105 4 : udp_header_t *udp = ip4_next_header(ip);
106 4 : ip_csum_t l4csum = udp->checksum;
107 4 : if (t->instructions & PNAT_INSTR_DESTINATION_PORT) {
108 2 : l4csum = ip_csum_sub_even(l4csum, udp->dst_port);
109 2 : l4csum = ip_csum_add_even(l4csum, clib_net_to_host_u16(t->post_dp));
110 2 : udp->dst_port = clib_net_to_host_u16(t->post_dp);
111 : }
112 4 : if (t->instructions & PNAT_INSTR_SOURCE_PORT) {
113 0 : l4csum = ip_csum_sub_even(l4csum, udp->src_port);
114 0 : l4csum = ip_csum_add_even(l4csum, clib_net_to_host_u16(t->post_sp));
115 0 : udp->src_port = clib_net_to_host_u16(t->post_sp);
116 : }
117 4 : if (udp->checksum) {
118 3 : l4csum = ip_csum_sub_even(l4csum, csumd);
119 3 : udp->checksum = ip_csum_fold(l4csum);
120 : }
121 : }
122 9 : if (t->instructions & PNAT_INSTR_COPY_BYTE) {
123 : /* Copy byte from somewhere in packet to elsewhere */
124 :
125 0 : if (t->to_offset >= plen || t->from_offset > plen) {
126 0 : return PNAT_ERROR_TOOSHORT;
127 : }
128 0 : u8 *p = (u8 *)ip;
129 0 : p[t->to_offset] = p[t->from_offset];
130 0 : ip->checksum = ip4_header_checksum(ip);
131 : // TODO: L4 checksum
132 : }
133 9 : if (t->instructions & PNAT_INSTR_CLEAR_BYTE) {
134 : /* Clear byte at offset */
135 0 : u8 *p = (u8 *)ip;
136 0 : p[t->clear_offset] = 0;
137 0 : ip->checksum = ip4_header_checksum(ip);
138 : // TODO: L4 checksum
139 : }
140 :
141 9 : return PNAT_ERROR_NONE;
142 : }
143 :
144 : /*
145 : * Lookup the packet tuple in the flow cache, given the lookup mask.
146 : * If a binding is found, rewrite the packet according to instructions,
147 : * otherwise follow configured default action (forward, punt or drop)
148 : */
149 : // TODO: Make use of SVR configurable
150 15 : static_always_inline uword pnat_node_inline(vlib_main_t *vm,
151 : vlib_node_runtime_t *node,
152 : vlib_frame_t *frame,
153 : pnat_attachment_point_t attachment,
154 : int dir) {
155 15 : pnat_main_t *pm = &pnat_main;
156 : u32 n_left_from, *from;
157 15 : u16 nexts[VLIB_FRAME_SIZE] = {0}, *next = nexts;
158 15 : u32 pool_indicies[VLIB_FRAME_SIZE], *pi = pool_indicies;
159 15 : vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
160 : clib_bihash_kv_16_8_t kv, value;
161 : ip4_header_t *ip0;
162 :
163 15 : from = vlib_frame_vector_args(frame);
164 15 : n_left_from = frame->n_vectors;
165 15 : vlib_get_buffers(vm, from, b, n_left_from);
166 : pnat_interface_t *interface;
167 :
168 : /* Stage 1: build vector of flow hash (based on lookup mask) */
169 30 : while (n_left_from > 0) {
170 15 : u32 sw_if_index0 = vnet_buffer(b[0])->sw_if_index[dir];
171 15 : u16 sport0 = vnet_buffer(b[0])->ip.reass.l4_src_port;
172 15 : u16 dport0 = vnet_buffer(b[0])->ip.reass.l4_dst_port;
173 15 : u32 iph_offset =
174 15 : dir == VLIB_TX ? vnet_buffer(b[0])->ip.save_rewrite_length : 0;
175 15 : ip0 = (ip4_header_t *)(vlib_buffer_get_current(b[0]) + iph_offset);
176 15 : interface = pnat_interface_by_sw_if_index(sw_if_index0);
177 15 : ASSERT(interface);
178 15 : pnat_mask_fast_t mask = interface->lookup_mask_fast[attachment];
179 15 : pnat_calc_key(sw_if_index0, attachment, ip0->src_address,
180 15 : ip0->dst_address, ip0->protocol, sport0, dport0, mask,
181 : &kv);
182 : /* By default pass packet to next node in the feature chain */
183 15 : vnet_feature_next_u16(next, b[0]);
184 :
185 15 : if (clib_bihash_search_16_8(&pm->flowhash, &kv, &value) == 0) {
186 : /* Cache hit */
187 9 : *pi = value.value;
188 9 : u32 errno0 = pnat_rewrite_ip4(value.value, ip0);
189 9 : if (PREDICT_FALSE(errno0)) {
190 0 : next[0] = PNAT_NEXT_DROP;
191 0 : b[0]->error = node->errors[errno0];
192 : }
193 : } else {
194 : /* Cache miss */
195 6 : *pi = ~0;
196 : }
197 :
198 : /*next: */
199 15 : next += 1;
200 15 : n_left_from -= 1;
201 15 : b += 1;
202 15 : pi += 1;
203 : }
204 :
205 : /* Packet trace */
206 15 : if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE))) {
207 : u32 i;
208 15 : b = bufs;
209 15 : pi = pool_indicies;
210 30 : for (i = 0; i < frame->n_vectors; i++) {
211 15 : if (b[0]->flags & VLIB_BUFFER_IS_TRACED) {
212 15 : pnat_trace_t *t = vlib_add_trace(vm, node, b[0], sizeof(*t));
213 15 : if (*pi != ~0) {
214 9 : if (!pool_is_free_index(pm->translations, *pi)) {
215 9 : pnat_translation_t *tr =
216 9 : pool_elt_at_index(pm->translations, *pi);
217 9 : t->match = tr->match;
218 9 : t->rewrite = tr->rewrite;
219 : }
220 : }
221 15 : t->pool_index = *pi;
222 15 : b += 1;
223 15 : pi += 1;
224 : } else
225 0 : break;
226 : }
227 : }
228 :
229 15 : vlib_buffer_enqueue_to_next(vm, node, from, nexts, frame->n_vectors);
230 :
231 15 : return frame->n_vectors;
232 : }
233 : #endif
|