Line data Source code
1 : /*
2 : * Copyright (c) 2015 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 : #ifndef __ESP_H__
16 : #define __ESP_H__
17 :
18 : #include <vnet/ip/ip.h>
19 : #include <vnet/crypto/crypto.h>
20 : #include <vnet/ipsec/ipsec.h>
21 : #include <vnet/ipsec/ipsec.api_enum.h>
22 :
23 : typedef struct
24 : {
25 : union
26 : {
27 : u32 spi;
28 : u8 spi_bytes[4];
29 : };
30 : u32 seq;
31 : u8 data[0];
32 : } esp_header_t;
33 :
34 : typedef struct
35 : {
36 : u8 pad_length;
37 : u8 next_header;
38 : } esp_footer_t;
39 :
40 : /* *INDENT-OFF* */
41 : typedef CLIB_PACKED (struct {
42 : ip4_header_t ip4;
43 : esp_header_t esp;
44 : }) ip4_and_esp_header_t;
45 : /* *INDENT-ON* */
46 :
47 : /* *INDENT-OFF* */
48 : typedef CLIB_PACKED (struct {
49 : ip4_header_t ip4;
50 : udp_header_t udp;
51 : esp_header_t esp;
52 : }) ip4_and_udp_and_esp_header_t;
53 : /* *INDENT-ON* */
54 :
55 : /* *INDENT-OFF* */
56 : typedef CLIB_PACKED (struct {
57 : ip6_header_t ip6;
58 : esp_header_t esp;
59 : }) ip6_and_esp_header_t;
60 : /* *INDENT-ON* */
61 :
62 : /**
63 : * AES counter mode nonce
64 : */
65 : typedef struct
66 : {
67 : u32 salt;
68 : u64 iv;
69 : u32 ctr; /* counter: 1 in big-endian for ctr, unused for gcm */
70 : } __clib_packed esp_ctr_nonce_t;
71 :
72 : STATIC_ASSERT_SIZEOF (esp_ctr_nonce_t, 16);
73 :
74 : /**
75 : * AES GCM Additional Authentication data
76 : */
77 : typedef struct esp_aead_t_
78 : {
79 : /**
80 : * for GCM: when using ESN it's:
81 : * SPI, seq-hi, seg-low
82 : * else
83 : * SPI, seq-low
84 : */
85 : u32 data[3];
86 : } __clib_packed esp_aead_t;
87 :
88 : #define ESP_SEQ_MAX (4294967295UL)
89 :
90 : u8 *format_esp_header (u8 * s, va_list * args);
91 :
92 : /* TODO seq increment should be atomic to be accessed by multiple workers */
93 : always_inline int
94 227305 : esp_seq_advance (ipsec_sa_t * sa)
95 : {
96 227305 : if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa)))
97 : {
98 99923 : if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
99 : {
100 38 : if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
101 : sa->seq_hi == ESP_SEQ_MAX))
102 0 : return 1;
103 38 : sa->seq_hi++;
104 : }
105 99923 : sa->seq++;
106 : }
107 : else
108 : {
109 127382 : if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
110 : sa->seq == ESP_SEQ_MAX))
111 861 : return 1;
112 126521 : sa->seq++;
113 : }
114 :
115 226444 : return 0;
116 : }
117 :
118 : always_inline u16
119 136161 : esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa,
120 : u32 seq_hi)
121 : {
122 : esp_aead_t *aad;
123 :
124 136161 : aad = (esp_aead_t *) data;
125 136161 : aad->data[0] = esp->spi;
126 :
127 136161 : if (ipsec_sa_is_set_USE_ESN (sa))
128 : {
129 : /* SPI, seq-hi, seq-low */
130 67875 : aad->data[1] = (u32) clib_host_to_net_u32 (seq_hi);
131 67875 : aad->data[2] = esp->seq;
132 67875 : return 12;
133 : }
134 : else
135 : {
136 : /* SPI, seq-low */
137 68286 : aad->data[1] = esp->seq;
138 68286 : return 8;
139 : }
140 : }
141 :
142 : always_inline u32
143 1984 : esp_encrypt_err_to_sa_err (u32 err)
144 : {
145 1984 : switch (err)
146 : {
147 1143 : case ESP_ENCRYPT_ERROR_HANDOFF:
148 1143 : return IPSEC_SA_ERROR_HANDOFF;
149 840 : case ESP_ENCRYPT_ERROR_SEQ_CYCLED:
150 840 : return IPSEC_SA_ERROR_SEQ_CYCLED;
151 0 : case ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR:
152 0 : return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
153 0 : case ESP_ENCRYPT_ERROR_CRYPTO_QUEUE_FULL:
154 0 : return IPSEC_SA_ERROR_CRYPTO_QUEUE_FULL;
155 0 : case ESP_ENCRYPT_ERROR_NO_BUFFERS:
156 0 : return IPSEC_SA_ERROR_NO_BUFFERS;
157 1 : case ESP_ENCRYPT_ERROR_NO_ENCRYPTION:
158 1 : return IPSEC_SA_ERROR_NO_ENCRYPTION;
159 : }
160 0 : return ~0;
161 : }
162 :
163 : always_inline u32
164 7801 : esp_decrypt_err_to_sa_err (u32 err)
165 : {
166 7801 : switch (err)
167 : {
168 240 : case ESP_DECRYPT_ERROR_HANDOFF:
169 240 : return IPSEC_SA_ERROR_HANDOFF;
170 522 : case ESP_DECRYPT_ERROR_DECRYPTION_FAILED:
171 522 : return IPSEC_SA_ERROR_DECRYPTION_FAILED;
172 1426 : case ESP_DECRYPT_ERROR_INTEG_ERROR:
173 1426 : return IPSEC_SA_ERROR_INTEG_ERROR;
174 0 : case ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR:
175 0 : return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
176 4262 : case ESP_DECRYPT_ERROR_REPLAY:
177 4262 : return IPSEC_SA_ERROR_REPLAY;
178 1224 : case ESP_DECRYPT_ERROR_RUNT:
179 1224 : return IPSEC_SA_ERROR_RUNT;
180 0 : case ESP_DECRYPT_ERROR_NO_BUFFERS:
181 0 : return IPSEC_SA_ERROR_NO_BUFFERS;
182 0 : case ESP_DECRYPT_ERROR_OVERSIZED_HEADER:
183 0 : return IPSEC_SA_ERROR_OVERSIZED_HEADER;
184 0 : case ESP_DECRYPT_ERROR_NO_TAIL_SPACE:
185 0 : return IPSEC_SA_ERROR_NO_TAIL_SPACE;
186 126 : case ESP_DECRYPT_ERROR_TUN_NO_PROTO:
187 126 : return IPSEC_SA_ERROR_TUN_NO_PROTO;
188 1 : case ESP_DECRYPT_ERROR_UNSUP_PAYLOAD:
189 1 : return IPSEC_SA_ERROR_UNSUP_PAYLOAD;
190 : }
191 0 : return ~0;
192 : }
193 :
194 : always_inline void
195 1984 : esp_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
196 : u32 thread_index, u32 err, u16 index, u16 *nexts,
197 : u16 drop_next, u32 sa_index)
198 : {
199 1984 : ipsec_set_next_index (b, node, thread_index, err,
200 : esp_encrypt_err_to_sa_err (err), index, nexts,
201 : drop_next, sa_index);
202 1984 : }
203 :
204 : always_inline void
205 7801 : esp_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
206 : u32 thread_index, u32 err, u16 index, u16 *nexts,
207 : u16 drop_next, u32 sa_index)
208 : {
209 7801 : ipsec_set_next_index (b, node, thread_index, err,
210 : esp_decrypt_err_to_sa_err (err), index, nexts,
211 : drop_next, sa_index);
212 7801 : }
213 :
214 : /**
215 : * The post data structure to for esp_encrypt/decrypt_inline to write to
216 : * vib_buffer_t opaque unused field, and for post nodes to pick up after
217 : * dequeue.
218 : **/
219 : typedef struct
220 : {
221 : union
222 : {
223 : struct
224 : {
225 : u8 icv_sz;
226 : u8 iv_sz;
227 : ipsec_sa_flags_t flags;
228 : u32 sa_index;
229 : };
230 : u64 sa_data;
231 : };
232 :
233 : u32 seq;
234 : i16 current_data;
235 : i16 current_length;
236 : u16 hdr_sz;
237 : u16 is_chain;
238 : u32 seq_hi;
239 : } esp_decrypt_packet_data_t;
240 :
241 : STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
242 : STATIC_ASSERT_OFFSET_OF (esp_decrypt_packet_data_t, seq, sizeof (u64));
243 :
244 : /* we are forced to store the decrypt post data into 2 separate places -
245 : vlib_opaque and opaque2. */
246 : typedef struct
247 : {
248 : vlib_buffer_t *lb;
249 : u32 free_buffer_index;
250 : u8 icv_removed;
251 : } esp_decrypt_packet_data2_t;
252 :
253 : typedef union
254 : {
255 : u16 next_index;
256 : esp_decrypt_packet_data_t decrypt_data;
257 : } esp_post_data_t;
258 :
259 : STATIC_ASSERT (sizeof (esp_post_data_t) <=
260 : STRUCT_SIZE_OF (vnet_buffer_opaque_t, unused),
261 : "Custom meta-data too large for vnet_buffer_opaque_t");
262 :
263 : #define esp_post_data(b) \
264 : ((esp_post_data_t *)((u8 *)((b)->opaque) \
265 : + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused)))
266 :
267 : STATIC_ASSERT (sizeof (esp_decrypt_packet_data2_t) <=
268 : STRUCT_SIZE_OF (vnet_buffer_opaque2_t, unused),
269 : "Custom meta-data too large for vnet_buffer_opaque2_t");
270 :
271 : #define esp_post_data2(b) \
272 : ((esp_decrypt_packet_data2_t *)((u8 *)((b)->opaque2) \
273 : + STRUCT_OFFSET_OF (vnet_buffer_opaque2_t, unused)))
274 :
275 : typedef struct
276 : {
277 : /* esp post node index for async crypto */
278 : u32 esp4_post_next;
279 : u32 esp6_post_next;
280 : u32 esp4_tun_post_next;
281 : u32 esp6_tun_post_next;
282 : u32 esp_mpls_tun_post_next;
283 : } esp_async_post_next_t;
284 :
285 : extern esp_async_post_next_t esp_encrypt_async_next;
286 : extern esp_async_post_next_t esp_decrypt_async_next;
287 :
288 : /* when submitting a frame is failed, drop all buffers in the frame */
289 : always_inline u32
290 0 : esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
291 : vlib_node_runtime_t *node, u32 err,
292 : u32 ipsec_sa_err, u16 index, u32 *from,
293 : u16 *nexts, u16 drop_next_index,
294 : bool is_encrypt)
295 : {
296 : vlib_buffer_t *b;
297 0 : u32 n_drop = f->n_elts;
298 0 : u32 *bi = f->buffer_indices;
299 :
300 0 : while (n_drop--)
301 : {
302 : u32 sa_index;
303 :
304 0 : from[index] = bi[0];
305 0 : b = vlib_get_buffer (vm, bi[0]);
306 :
307 0 : if (is_encrypt)
308 : {
309 0 : sa_index = vnet_buffer (b)->ipsec.sad_index;
310 : }
311 : else
312 : {
313 0 : sa_index = esp_post_data (b)->decrypt_data.sa_index;
314 : }
315 :
316 0 : ipsec_set_next_index (b, node, vm->thread_index, err, ipsec_sa_err,
317 : index, nexts, drop_next_index, sa_index);
318 0 : bi++;
319 0 : index++;
320 : }
321 :
322 0 : return (f->n_elts);
323 : }
324 :
325 : #endif /* __ESP_H__ */
326 :
327 : /*
328 : * fd.io coding-style-patch-verification: ON
329 : *
330 : * Local Variables:
331 : * eval: (c-set-style "gnu")
332 : * End:
333 : */
|