Line data Source code
1 : /* 2 : *------------------------------------------------------------------ 3 : * Copyright (c) 2018 Cisco and/or its affiliates. 4 : * Licensed under the Apache License, Version 2.0 (the "License"); 5 : * you may not use this file except in compliance with the License. 6 : * You may obtain a copy of the License at: 7 : * 8 : * http://www.apache.org/licenses/LICENSE-2.0 9 : * 10 : * Unless required by applicable law or agreed to in writing, software 11 : * distributed under the License is distributed on an "AS IS" BASIS, 12 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 : * See the License for the specific language governing permissions and 14 : * limitations under the License. 15 : *------------------------------------------------------------------ 16 : */ 17 : 18 : #include <vlib/vlib.h> 19 : #include <vlib/unix/unix.h> 20 : #include <vlib/pci/pci.h> 21 : #include <vnet/ethernet/ethernet.h> 22 : #include <vnet/devices/devices.h> 23 : #include <vnet/ip/ip6_packet.h> 24 : #include <vnet/ip/ip4_packet.h> 25 : 26 : #include <vmxnet3/vmxnet3.h> 27 : 28 : static_always_inline void 29 0 : vmxnet3_tx_comp_ring_advance_next (vmxnet3_txq_t * txq) 30 : { 31 0 : vmxnet3_tx_comp_ring *comp_ring = &txq->tx_comp_ring; 32 : 33 0 : comp_ring->next++; 34 0 : if (PREDICT_FALSE (comp_ring->next == txq->size)) 35 : { 36 0 : comp_ring->next = 0; 37 0 : comp_ring->gen ^= VMXNET3_TXCF_GEN; 38 : } 39 0 : } 40 : 41 : static_always_inline void 42 0 : vmxnet3_tx_ring_advance_produce (vmxnet3_txq_t * txq) 43 : { 44 0 : txq->tx_ring.produce++; 45 0 : if (PREDICT_FALSE (txq->tx_ring.produce == txq->size)) 46 : { 47 0 : txq->tx_ring.produce = 0; 48 0 : txq->tx_ring.gen ^= VMXNET3_TXF_GEN; 49 : } 50 0 : } 51 : 52 : static_always_inline void 53 0 : vmxnet3_tx_ring_advance_consume (vmxnet3_txq_t * txq) 54 : { 55 0 : txq->tx_ring.consume++; 56 0 : txq->tx_ring.consume &= txq->size - 1; 57 0 : } 58 : 59 : static_always_inline void 60 0 : vmxnet3_txq_release (vlib_main_t * vm, vmxnet3_device_t * vd, 61 : vmxnet3_txq_t * txq) 62 : { 63 : vmxnet3_tx_comp *tx_comp; 64 : vmxnet3_tx_comp_ring *comp_ring; 65 : 66 0 : comp_ring = &txq->tx_comp_ring; 67 0 : tx_comp = &txq->tx_comp[comp_ring->next]; 68 : 69 0 : while ((tx_comp->flags & VMXNET3_TXCF_GEN) == comp_ring->gen) 70 : { 71 0 : u16 eop_idx = tx_comp->index & VMXNET3_TXC_INDEX; 72 0 : u32 bi0 = txq->tx_ring.bufs[txq->tx_ring.consume]; 73 : 74 0 : vlib_buffer_free_one (vm, bi0); 75 0 : while (txq->tx_ring.consume != eop_idx) 76 : { 77 0 : vmxnet3_tx_ring_advance_consume (txq); 78 : } 79 0 : vmxnet3_tx_ring_advance_consume (txq); 80 : 81 0 : vmxnet3_tx_comp_ring_advance_next (txq); 82 0 : tx_comp = &txq->tx_comp[comp_ring->next]; 83 : } 84 0 : } 85 : 86 : static_always_inline u16 87 0 : vmxnet3_tx_ring_space_left (vmxnet3_txq_t * txq) 88 : { 89 : u16 count; 90 : 91 0 : count = (txq->tx_ring.consume - txq->tx_ring.produce - 1); 92 : /* Wrapped? */ 93 0 : if (txq->tx_ring.produce >= txq->tx_ring.consume) 94 0 : count += txq->size; 95 0 : return count; 96 : } 97 : 98 2236 : VNET_DEVICE_CLASS_TX_FN (vmxnet3_device_class) (vlib_main_t * vm, 99 : vlib_node_runtime_t * node, 100 : vlib_frame_t * frame) 101 : { 102 0 : vmxnet3_main_t *vmxm = &vmxnet3_main; 103 0 : vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; 104 0 : vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, rd->dev_instance); 105 0 : u32 *buffers = vlib_frame_vector_args (frame); 106 : u32 bi0; 107 : vlib_buffer_t *b0; 108 0 : vmxnet3_tx_desc *txd = 0; 109 : u32 desc_idx, generation, first_idx; 110 : u16 space_left; 111 0 : u16 n_left = frame->n_vectors; 112 : vmxnet3_txq_t *txq; 113 0 : vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame); 114 0 : u16 qid = tf->queue_id, produce; 115 : 116 0 : if (PREDICT_FALSE (!(vd->flags & VMXNET3_DEVICE_F_LINK_UP))) 117 : { 118 0 : vlib_buffer_free (vm, buffers, n_left); 119 0 : vlib_error_count (vm, node->node_index, VMXNET3_TX_ERROR_LINK_DOWN, 120 : n_left); 121 0 : return (0); 122 : } 123 : 124 0 : txq = vec_elt_at_index (vd->txqs, qid); 125 0 : if (tf->shared_queue) 126 0 : clib_spinlock_lock (&txq->lock); 127 : 128 0 : vmxnet3_txq_release (vm, vd, txq); 129 : 130 0 : produce = txq->tx_ring.produce; 131 0 : while (PREDICT_TRUE (n_left)) 132 : { 133 0 : u16 space_needed = 1, i; 134 0 : u32 gso_size = 0; 135 : u32 l4_hdr_sz; 136 : vlib_buffer_t *b; 137 0 : u32 hdr_len = 0; 138 : 139 0 : bi0 = buffers[0]; 140 0 : b0 = vlib_get_buffer (vm, bi0); 141 0 : b = b0; 142 : 143 0 : space_left = vmxnet3_tx_ring_space_left (txq); 144 0 : while (b->flags & VLIB_BUFFER_NEXT_PRESENT) 145 : { 146 0 : u32 next_buffer = b->next_buffer; 147 : 148 0 : b = vlib_get_buffer (vm, next_buffer); 149 0 : space_needed++; 150 : } 151 0 : if (PREDICT_FALSE (space_left < space_needed)) 152 : { 153 0 : vmxnet3_txq_release (vm, vd, txq); 154 0 : space_left = vmxnet3_tx_ring_space_left (txq); 155 : 156 0 : if (PREDICT_FALSE (space_left < space_needed)) 157 : { 158 0 : vlib_buffer_free_one (vm, bi0); 159 0 : vlib_error_count (vm, node->node_index, 160 : VMXNET3_TX_ERROR_NO_FREE_SLOTS, 1); 161 0 : buffers++; 162 0 : n_left--; 163 : /* 164 : * Drop this packet. But we may have enough room for the next 165 : * packet 166 : */ 167 0 : continue; 168 : } 169 : } 170 : 171 : /* 172 : * Toggle the generation bit for SOP fragment to avoid device starts 173 : * reading incomplete packet 174 : */ 175 0 : generation = txq->tx_ring.gen ^ VMXNET3_TXF_GEN; 176 0 : first_idx = txq->tx_ring.produce; 177 0 : for (i = 0; i < space_needed; i++) 178 : { 179 0 : b0 = vlib_get_buffer (vm, bi0); 180 : 181 0 : desc_idx = txq->tx_ring.produce; 182 : 183 0 : vmxnet3_tx_ring_advance_produce (txq); 184 0 : txq->tx_ring.bufs[desc_idx] = bi0; 185 : 186 0 : txd = &txq->tx_desc[desc_idx]; 187 : 188 0 : txd->address = vlib_buffer_get_current_pa (vm, b0); 189 : 190 0 : txd->flags[0] = generation | b0->current_length; 191 0 : txd->flags[1] = 0; 192 0 : if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO)) 193 : { 194 : /* 195 : * We should not be getting GSO outbound traffic unless it is 196 : * lro is enable 197 : */ 198 0 : ASSERT (vd->gso_enable == 1); 199 0 : gso_size = vnet_buffer2 (b0)->gso_size; 200 0 : l4_hdr_sz = vnet_buffer2 (b0)->gso_l4_hdr_sz; 201 0 : if (b0->flags & VNET_BUFFER_F_IS_IP6) 202 0 : hdr_len = sizeof (ethernet_header_t) + sizeof (ip6_header_t) + 203 : l4_hdr_sz; 204 : else 205 0 : hdr_len = sizeof (ethernet_header_t) + sizeof (ip4_header_t) + 206 : l4_hdr_sz; 207 : } 208 : 209 0 : generation = txq->tx_ring.gen; 210 0 : bi0 = b0->next_buffer; 211 : } 212 0 : if (PREDICT_FALSE (gso_size != 0)) 213 : { 214 0 : txq->tx_desc[first_idx].flags[1] = hdr_len; 215 0 : txq->tx_desc[first_idx].flags[1] |= VMXNET3_TXF_OM (VMXNET3_OM_TSO); 216 0 : txq->tx_desc[first_idx].flags[0] |= VMXNET3_TXF_MSSCOF (gso_size); 217 : } 218 0 : txd->flags[1] |= VMXNET3_TXF_CQ | VMXNET3_TXF_EOP; 219 0 : asm volatile ("":::"memory"); 220 : /* 221 : * Now toggle back the generation bit for the first segment. 222 : * Device can start reading the packet 223 : */ 224 0 : txq->tx_desc[first_idx].flags[0] ^= VMXNET3_TXF_GEN; 225 : 226 0 : buffers++; 227 0 : n_left--; 228 : } 229 : 230 0 : if (PREDICT_TRUE (produce != txq->tx_ring.produce)) 231 0 : vmxnet3_reg_write_inline (vd, 0, txq->reg_txprod, txq->tx_ring.produce); 232 : 233 0 : if (tf->shared_queue) 234 0 : clib_spinlock_unlock (&txq->lock); 235 : 236 0 : return (frame->n_vectors - n_left); 237 : } 238 : 239 : /* 240 : * fd.io coding-style-patch-verification: ON 241 : * 242 : * Local Variables: 243 : * eval: (c-set-style "gnu") 244 : * End: 245 : */