Line data Source code
1 : /*
2 : * Copyright (c) 2020 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <vppinfra/clib.h>
17 : #include <vppinfra/mem.h>
18 : #include <vppinfra/time.h>
19 : #include <vppinfra/format.h>
20 : #include <vppinfra/clib_error.h>
21 :
22 : /* while usage of dlmalloc APIs is genrally discouraged, in this particular
23 : * case there is significant benefit of calling them directly due to
24 : * smaller memory consuption (no wwp and headroom space) */
25 : #include <vppinfra/dlmalloc.h>
26 :
27 : #define CLIB_MEM_BULK_DEFAULT_MIN_ELTS_PER_CHUNK 32
28 :
29 : typedef struct clib_mem_bulk_chunk_hdr
30 : {
31 : u32 freelist;
32 : u32 n_free;
33 : struct clib_mem_bulk_chunk_hdr *prev, *next;
34 : } clib_mem_bulk_chunk_hdr_t;
35 :
36 : typedef struct
37 : {
38 : u32 elt_sz;
39 : u32 chunk_hdr_sz;
40 : u32 elts_per_chunk;
41 : u32 align;
42 : u32 chunk_align;
43 : void *mspace;
44 : clib_mem_bulk_chunk_hdr_t *full_chunks, *avail_chunks;
45 : } clib_mem_bulk_t;
46 :
47 : static inline uword
48 916 : bulk_chunk_size (clib_mem_bulk_t *b)
49 : {
50 916 : return (uword) b->elts_per_chunk * b->elt_sz + b->chunk_hdr_sz;
51 : }
52 :
53 : __clib_export clib_mem_bulk_handle_t
54 560 : clib_mem_bulk_init (u32 elt_sz, u32 align, u32 min_elts_per_chunk)
55 : {
56 560 : clib_mem_heap_t *heap = clib_mem_get_heap ();
57 : clib_mem_bulk_t *b;
58 : uword sz;
59 :
60 560 : if ((b = mspace_memalign (heap->mspace, 16, sizeof (clib_mem_bulk_t))) == 0)
61 0 : return 0;
62 :
63 560 : if (align < 16)
64 0 : align = 16;
65 :
66 560 : if (min_elts_per_chunk == 0)
67 0 : min_elts_per_chunk = CLIB_MEM_BULK_DEFAULT_MIN_ELTS_PER_CHUNK;
68 :
69 560 : clib_mem_unpoison (b, sizeof (clib_mem_bulk_t));
70 560 : clib_memset (b, 0, sizeof (clib_mem_bulk_t));
71 560 : b->mspace = heap->mspace;
72 560 : b->align = align;
73 560 : b->elt_sz = round_pow2 (elt_sz, align);
74 560 : b->chunk_hdr_sz = round_pow2 (sizeof (clib_mem_bulk_chunk_hdr_t), align);
75 560 : b->elts_per_chunk = min_elts_per_chunk;
76 560 : sz = bulk_chunk_size (b);
77 560 : b->chunk_align = max_pow2 (sz);
78 560 : b->elts_per_chunk += (b->chunk_align - sz) / b->elt_sz;
79 560 : return b;
80 : }
81 :
82 : __clib_export void
83 145 : clib_mem_bulk_destroy (clib_mem_bulk_handle_t h)
84 : {
85 145 : clib_mem_bulk_t *b = h;
86 : clib_mem_bulk_chunk_hdr_t *c, *next;
87 145 : void *ms = b->mspace;
88 :
89 145 : c = b->full_chunks;
90 :
91 145 : again:
92 145 : while (c)
93 : {
94 0 : next = c->next;
95 0 : clib_mem_poison (c, bulk_chunk_size (b));
96 0 : mspace_free (ms, c);
97 0 : c = next;
98 : }
99 :
100 145 : if (b->avail_chunks)
101 : {
102 0 : c = b->avail_chunks;
103 0 : b->avail_chunks = 0;
104 0 : goto again;
105 : }
106 :
107 145 : clib_mem_poison (b, sizeof (clib_mem_bulk_t));
108 145 : mspace_free (ms, b);
109 145 : }
110 :
111 : static inline void *
112 17415 : get_chunk_elt_ptr (clib_mem_bulk_t *b, clib_mem_bulk_chunk_hdr_t *c, u32 index)
113 : {
114 17415 : return (u8 *) c + b->chunk_hdr_sz + index * b->elt_sz;
115 : }
116 :
117 : static inline void
118 0 : add_to_chunk_list (clib_mem_bulk_chunk_hdr_t **first,
119 : clib_mem_bulk_chunk_hdr_t *c)
120 : {
121 0 : c->next = *first;
122 0 : c->prev = 0;
123 0 : if (c->next)
124 0 : c->next->prev = c;
125 0 : *first = c;
126 0 : }
127 :
128 : static inline void
129 130 : remove_from_chunk_list (clib_mem_bulk_chunk_hdr_t **first,
130 : clib_mem_bulk_chunk_hdr_t *c)
131 : {
132 130 : if (c->next)
133 0 : c->next->prev = c->prev;
134 130 : if (c->prev)
135 0 : c->prev->next = c->next;
136 : else
137 130 : *first = c->next;
138 130 : }
139 :
140 : __clib_export void *
141 1207 : clib_mem_bulk_alloc (clib_mem_bulk_handle_t h)
142 : {
143 1207 : clib_mem_bulk_t *b = h;
144 1207 : clib_mem_bulk_chunk_hdr_t *c = b->avail_chunks;
145 : u32 elt_idx;
146 :
147 1207 : if (b->avail_chunks == 0)
148 : {
149 226 : u32 i, sz = bulk_chunk_size (b);
150 226 : c = mspace_memalign (b->mspace, b->chunk_align, sz);
151 226 : clib_mem_unpoison (c, sz);
152 226 : clib_memset (c, 0, sizeof (clib_mem_bulk_chunk_hdr_t));
153 226 : b->avail_chunks = c;
154 226 : c->n_free = b->elts_per_chunk;
155 :
156 : /* populate freelist */
157 14238 : for (i = 0; i < b->elts_per_chunk - 1; i++)
158 14012 : *((u32 *) get_chunk_elt_ptr (b, c, i)) = i + 1;
159 226 : *((u32 *) get_chunk_elt_ptr (b, c, i)) = ~0;
160 : }
161 :
162 1207 : ASSERT (c->freelist != ~0);
163 1207 : elt_idx = c->freelist;
164 1207 : c->freelist = *((u32 *) get_chunk_elt_ptr (b, c, elt_idx));
165 1207 : c->n_free--;
166 :
167 1207 : if (c->n_free == 0)
168 : {
169 : /* chunk is full */
170 0 : ASSERT (c->freelist == ~0);
171 0 : remove_from_chunk_list (&b->avail_chunks, c);
172 0 : add_to_chunk_list (&b->full_chunks, c);
173 : }
174 :
175 1207 : return get_chunk_elt_ptr (b, c, elt_idx);
176 : }
177 :
178 : __clib_export void
179 763 : clib_mem_bulk_free (clib_mem_bulk_handle_t h, void *p)
180 : {
181 763 : clib_mem_bulk_t *b = h;
182 763 : uword offset = (uword) p & (b->chunk_align - 1);
183 763 : clib_mem_bulk_chunk_hdr_t *c = (void *) ((u8 *) p - offset);
184 763 : u32 elt_idx = (offset - b->chunk_hdr_sz) / b->elt_sz;
185 :
186 763 : ASSERT (elt_idx < b->elts_per_chunk);
187 763 : ASSERT (get_chunk_elt_ptr (b, c, elt_idx) == p);
188 :
189 763 : c->n_free++;
190 :
191 763 : if (c->n_free == b->elts_per_chunk)
192 : {
193 : /* chunk is empty - give it back */
194 130 : remove_from_chunk_list (&b->avail_chunks, c);
195 130 : clib_mem_poison (c, bulk_chunk_size (b));
196 130 : mspace_free (b->mspace, c);
197 130 : return;
198 : }
199 :
200 633 : if (c->n_free == 1)
201 : {
202 : /* move chunk to avail chunks */
203 0 : remove_from_chunk_list (&b->full_chunks, c);
204 0 : add_to_chunk_list (&b->avail_chunks, c);
205 : }
206 :
207 : /* add elt to freelist */
208 633 : *(u32 *) p = c->freelist;
209 633 : c->freelist = elt_idx;
210 : }
211 :
212 : __clib_export u8 *
213 0 : format_clib_mem_bulk (u8 *s, va_list *args)
214 : {
215 0 : clib_mem_bulk_t *b = va_arg (*args, clib_mem_bulk_handle_t);
216 : clib_mem_bulk_chunk_hdr_t *c;
217 0 : uword n_chunks = 0, n_free_elts = 0, n_elts, chunk_sz;
218 :
219 0 : c = b->full_chunks;
220 0 : while (c)
221 : {
222 0 : n_chunks++;
223 0 : c = c->next;
224 : }
225 :
226 0 : c = b->avail_chunks;
227 0 : while (c)
228 : {
229 0 : n_chunks++;
230 0 : n_free_elts += c->n_free;
231 0 : c = c->next;
232 : }
233 :
234 0 : n_elts = n_chunks * b->elts_per_chunk;
235 0 : chunk_sz = b->chunk_hdr_sz + (uword) b->elts_per_chunk * b->elt_sz;
236 :
237 0 : s = format (s, "%u bytes/elt, align %u, chunk-align %u, ", b->elt_sz,
238 : b->align, b->chunk_align);
239 0 : s = format (s, "%u elts-per-chunk, chunk size %lu bytes", b->elts_per_chunk,
240 : chunk_sz);
241 :
242 0 : if (n_chunks == 0)
243 0 : return format (s, "\nempty");
244 :
245 0 : s = format (s, "\n%lu chunks allocated, ", n_chunks);
246 0 : s = format (s, "%lu / %lu free elts (%.1f%%), ", n_free_elts, n_elts,
247 0 : (f64) n_free_elts * 100 / n_elts);
248 0 : s = format (s, "%lu bytes of memory consumed", n_chunks * chunk_sz);
249 :
250 0 : return s;
251 : }
|