DPDK  /usr/bin/make-f/build/dpdk-gENb47/dpdk-16.11/mk/rte.sdkconfig.mkshowversion
rte_mempool.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright(c) 2016 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
37 
63 #include <stdio.h>
64 #include <stdlib.h>
65 #include <stdint.h>
66 #include <errno.h>
67 #include <inttypes.h>
68 #include <sys/queue.h>
69 
70 #include <rte_spinlock.h>
71 #include <rte_log.h>
72 #include <rte_debug.h>
73 #include <rte_lcore.h>
74 #include <rte_memory.h>
75 #include <rte_branch_prediction.h>
76 #include <rte_ring.h>
77 #include <rte_memcpy.h>
78 #include <rte_common.h>
79 
80 #ifdef __cplusplus
81 extern "C" {
82 #endif
83 
84 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
85 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
86 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
88 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
89 
92 struct rte_mempool_debug_stats {
93  uint64_t put_bulk;
94  uint64_t put_objs;
95  uint64_t get_success_bulk;
96  uint64_t get_success_objs;
97  uint64_t get_fail_bulk;
98  uint64_t get_fail_objs;
100 #endif
101 
106  uint32_t size;
107  uint32_t flushthresh;
108  uint32_t len;
109  /*
110  * Cache is allocated to this size to allow it to overflow in certain
111  * cases to avoid needless emptying of cache.
112  */
113  void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
115 
120  uint32_t elt_size;
121  uint32_t header_size;
122  uint32_t trailer_size;
123  uint32_t total_size;
125 };
126 
128 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
129  sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
130 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
131 
132 /* "MP_<name>" */
133 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
134 
135 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
136 
138 #define MEMPOOL_PG_NUM_DEFAULT 1
139 
140 #ifndef RTE_MEMPOOL_ALIGN
141 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
142 #endif
143 
144 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
145 
156  STAILQ_ENTRY(rte_mempool_objhdr) next;
157  struct rte_mempool *mp;
159 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
160  uint64_t cookie;
161 #endif
162 };
163 
167 STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
168 
169 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
170 
177 struct rte_mempool_objtlr {
178  uint64_t cookie;
179 };
180 
181 #endif
182 
186 STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
187 
192  void *opaque);
193 
201  STAILQ_ENTRY(rte_mempool_memhdr) next;
202  struct rte_mempool *mp;
203  void *addr;
205  size_t len;
207  void *opaque;
208 };
209 
213 struct rte_mempool {
214  /*
215  * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
216  * compatibility requirements, it could be changed to
217  * RTE_MEMPOOL_NAMESIZE next time the ABI changes
218  */
219  char name[RTE_MEMZONE_NAMESIZE];
221  union {
222  void *pool_data;
223  uint64_t pool_id;
224  };
225  void *pool_config;
226  const struct rte_memzone *mz;
227  int flags;
228  int socket_id;
229  uint32_t size;
230  uint32_t cache_size;
233  uint32_t elt_size;
234  uint32_t header_size;
235  uint32_t trailer_size;
237  unsigned private_data_size;
245  int32_t ops_index;
246 
249  uint32_t populated_size;
250  struct rte_mempool_objhdr_list elt_list;
251  uint32_t nb_mem_chunks;
252  struct rte_mempool_memhdr_list mem_list;
254 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
255 
256  struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
257 #endif
259 
260 #define MEMPOOL_F_NO_SPREAD 0x0001
261 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
262 #define MEMPOOL_F_SP_PUT 0x0004
263 #define MEMPOOL_F_SC_GET 0x0008
264 #define MEMPOOL_F_POOL_CREATED 0x0010
265 #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020
277 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
278 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
279  unsigned __lcore_id = rte_lcore_id(); \
280  if (__lcore_id < RTE_MAX_LCORE) { \
281  mp->stats[__lcore_id].name##_objs += n; \
282  mp->stats[__lcore_id].name##_bulk += 1; \
283  } \
284  } while(0)
285 #else
286 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
287 #endif
288 
297 #define MEMPOOL_HEADER_SIZE(mp, cs) \
298  (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
299  (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
300 
301 /* return the header of a mempool object (internal) */
302 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
303 {
304  return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
305  sizeof(struct rte_mempool_objhdr));
306 }
307 
317 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
318 {
319  struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
320  return hdr->mp;
321 }
322 
323 /* return the trailer of a mempool object (internal) */
324 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
325 {
326  struct rte_mempool *mp = rte_mempool_from_obj(obj);
327  return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
328 }
329 
344 void rte_mempool_check_cookies(const struct rte_mempool *mp,
345  void * const *obj_table_const, unsigned n, int free);
346 
347 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
348 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
349  rte_mempool_check_cookies(mp, obj_table_const, n, free)
350 #else
351 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
352 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
353 
354 #define RTE_MEMPOOL_OPS_NAMESIZE 32
366 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
367 
371 typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
372 
376 typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
377  void * const *obj_table, unsigned int n);
378 
382 typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
383  void **obj_table, unsigned int n);
384 
388 typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
389 
399 
400 #define RTE_MEMPOOL_MAX_OPS_IDX 16
411 struct rte_mempool_ops_table {
413  uint32_t num_ops;
419 
422 
432 static inline struct rte_mempool_ops *
433 rte_mempool_get_ops(int ops_index)
434 {
435  RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
436 
437  return &rte_mempool_ops_table.ops[ops_index];
438 }
439 
449 int
450 rte_mempool_ops_alloc(struct rte_mempool *mp);
451 
465 static inline int
466 rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
467  void **obj_table, unsigned n)
468 {
469  struct rte_mempool_ops *ops;
470 
471  ops = rte_mempool_get_ops(mp->ops_index);
472  return ops->dequeue(mp, obj_table, n);
473 }
474 
488 static inline int
489 rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
490  unsigned n)
491 {
492  struct rte_mempool_ops *ops;
493 
494  ops = rte_mempool_get_ops(mp->ops_index);
495  return ops->enqueue(mp, obj_table, n);
496 }
497 
506 unsigned
507 rte_mempool_ops_get_count(const struct rte_mempool *mp);
508 
515 void
516 rte_mempool_ops_free(struct rte_mempool *mp);
517 
535 int
536 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
537  void *pool_config);
538 
549 int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
550 
556 #define MEMPOOL_REGISTER_OPS(ops) \
557  void mp_hdlr_init_##ops(void); \
558  void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
559  { \
560  rte_mempool_register_ops(&ops); \
561  }
562 
568 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
569  void *opaque, void *obj, unsigned obj_idx);
570 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
571 
577 typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
578  void *opaque, struct rte_mempool_memhdr *memhdr,
579  unsigned mem_idx);
580 
587 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
588 
670 struct rte_mempool *
671 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
672  unsigned cache_size, unsigned private_data_size,
673  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
674  rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
675  int socket_id, unsigned flags);
676 
736 struct rte_mempool *
737 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
738  unsigned cache_size, unsigned private_data_size,
739  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
740  rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
741  int socket_id, unsigned flags, void *vaddr,
742  const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
743 
778 struct rte_mempool *
779 rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
780  unsigned cache_size, unsigned private_data_size,
781  int socket_id, unsigned flags);
792 void
793 rte_mempool_free(struct rte_mempool *mp);
794 
822 int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
823  phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
824  void *opaque);
825 
853 int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
854  const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
855  rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
856 
881 int
882 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
883  size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
884  void *opaque);
885 
900 
914 int rte_mempool_populate_anon(struct rte_mempool *mp);
915 
931 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
932  rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
933 
949 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
950  rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
951 
960 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
961 
976 struct rte_mempool_cache *
977 rte_mempool_cache_create(uint32_t size, int socket_id);
978 
985 void
987 
996 static inline void __attribute__((always_inline))
998  struct rte_mempool *mp)
999 {
1000  rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1001  cache->len = 0;
1002 }
1003 
1014 static inline struct rte_mempool_cache *__attribute__((always_inline))
1015 rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1016 {
1017  if (mp->cache_size == 0)
1018  return NULL;
1019 
1020  if (lcore_id >= RTE_MAX_LCORE)
1021  return NULL;
1022 
1023  return &mp->local_cache[lcore_id];
1024 }
1025 
1041 static inline void __attribute__((always_inline))
1042 __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1043  unsigned n, struct rte_mempool_cache *cache, int flags)
1044 {
1045  void **cache_objs;
1046 
1047  /* increment stat now, adding in mempool always success */
1048  __MEMPOOL_STAT_ADD(mp, put, n);
1049 
1050  /* No cache provided or single producer */
1051  if (unlikely(cache == NULL || flags & MEMPOOL_F_SP_PUT))
1052  goto ring_enqueue;
1053 
1054  /* Go straight to ring if put would overflow mem allocated for cache */
1055  if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1056  goto ring_enqueue;
1057 
1058  cache_objs = &cache->objs[cache->len];
1059 
1060  /*
1061  * The cache follows the following algorithm
1062  * 1. Add the objects to the cache
1063  * 2. Anything greater than the cache min value (if it crosses the
1064  * cache flush threshold) is flushed to the ring.
1065  */
1066 
1067  /* Add elements back into the cache */
1068  rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
1069 
1070  cache->len += n;
1071 
1072  if (cache->len >= cache->flushthresh) {
1073  rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
1074  cache->len - cache->size);
1075  cache->len = cache->size;
1076  }
1077 
1078  return;
1079 
1080 ring_enqueue:
1081 
1082  /* push remaining objects in ring */
1083 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1084  if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1085  rte_panic("cannot put objects in mempool\n");
1086 #else
1087  rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1088 #endif
1089 }
1090 
1091 
1107 static inline void __attribute__((always_inline))
1108 rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1109  unsigned n, struct rte_mempool_cache *cache, int flags)
1110 {
1111  __mempool_check_cookies(mp, obj_table, n, 0);
1112  __mempool_generic_put(mp, obj_table, n, cache, flags);
1113 }
1114 
1126 __rte_deprecated
1127 static inline void __attribute__((always_inline))
1128 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1129  unsigned n)
1130 {
1131  struct rte_mempool_cache *cache;
1132  cache = rte_mempool_default_cache(mp, rte_lcore_id());
1133  rte_mempool_generic_put(mp, obj_table, n, cache, 0);
1134 }
1135 
1147 __rte_deprecated
1148 static inline void __attribute__((always_inline))
1149 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1150  unsigned n)
1151 {
1152  rte_mempool_generic_put(mp, obj_table, n, NULL, MEMPOOL_F_SP_PUT);
1153 }
1154 
1169 static inline void __attribute__((always_inline))
1170 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1171  unsigned n)
1172 {
1173  struct rte_mempool_cache *cache;
1174  cache = rte_mempool_default_cache(mp, rte_lcore_id());
1175  rte_mempool_generic_put(mp, obj_table, n, cache, mp->flags);
1176 }
1177 
1187 __rte_deprecated
1188 static inline void __attribute__((always_inline))
1189 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
1190 {
1191  struct rte_mempool_cache *cache;
1193  rte_mempool_generic_put(mp, &obj, 1, cache, 0);
1194 }
1195 
1205 __rte_deprecated
1206 static inline void __attribute__((always_inline))
1207 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
1208 {
1209  rte_mempool_generic_put(mp, &obj, 1, NULL, MEMPOOL_F_SP_PUT);
1210 }
1211 
1224 static inline void __attribute__((always_inline))
1225 rte_mempool_put(struct rte_mempool *mp, void *obj)
1226 {
1227  rte_mempool_put_bulk(mp, &obj, 1);
1228 }
1229 
1247 static inline int __attribute__((always_inline))
1248 __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1249  unsigned n, struct rte_mempool_cache *cache, int flags)
1250 {
1251  int ret;
1252  uint32_t index, len;
1253  void **cache_objs;
1254 
1255  /* No cache provided or single consumer */
1256  if (unlikely(cache == NULL || flags & MEMPOOL_F_SC_GET ||
1257  n >= cache->size))
1258  goto ring_dequeue;
1259 
1260  cache_objs = cache->objs;
1261 
1262  /* Can this be satisfied from the cache? */
1263  if (cache->len < n) {
1264  /* No. Backfill the cache first, and then fill from it */
1265  uint32_t req = n + (cache->size - cache->len);
1266 
1267  /* How many do we require i.e. number to fill the cache + the request */
1268  ret = rte_mempool_ops_dequeue_bulk(mp,
1269  &cache->objs[cache->len], req);
1270  if (unlikely(ret < 0)) {
1271  /*
1272  * In the offchance that we are buffer constrained,
1273  * where we are not able to allocate cache + n, go to
1274  * the ring directly. If that fails, we are truly out of
1275  * buffers.
1276  */
1277  goto ring_dequeue;
1278  }
1279 
1280  cache->len += req;
1281  }
1282 
1283  /* Now fill in the response ... */
1284  for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
1285  *obj_table = cache_objs[len];
1286 
1287  cache->len -= n;
1288 
1289  __MEMPOOL_STAT_ADD(mp, get_success, n);
1290 
1291  return 0;
1292 
1293 ring_dequeue:
1294 
1295  /* get remaining objects from ring */
1296  ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1297 
1298  if (ret < 0)
1299  __MEMPOOL_STAT_ADD(mp, get_fail, n);
1300  else
1301  __MEMPOOL_STAT_ADD(mp, get_success, n);
1302 
1303  return ret;
1304 }
1305 
1329 static inline int __attribute__((always_inline))
1330 rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
1331  struct rte_mempool_cache *cache, int flags)
1332 {
1333  int ret;
1334  ret = __mempool_generic_get(mp, obj_table, n, cache, flags);
1335  if (ret == 0)
1336  __mempool_check_cookies(mp, obj_table, n, 1);
1337  return ret;
1338 }
1339 
1359 __rte_deprecated
1360 static inline int __attribute__((always_inline))
1361 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1362 {
1363  struct rte_mempool_cache *cache;
1364  cache = rte_mempool_default_cache(mp, rte_lcore_id());
1365  return rte_mempool_generic_get(mp, obj_table, n, cache, 0);
1366 }
1367 
1388 __rte_deprecated
1389 static inline int __attribute__((always_inline))
1390 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1391 {
1392  return rte_mempool_generic_get(mp, obj_table, n, NULL,
1394 }
1395 
1418 static inline int __attribute__((always_inline))
1419 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1420 {
1421  struct rte_mempool_cache *cache;
1422  cache = rte_mempool_default_cache(mp, rte_lcore_id());
1423  return rte_mempool_generic_get(mp, obj_table, n, cache, mp->flags);
1424 }
1425 
1443 __rte_deprecated
1444 static inline int __attribute__((always_inline))
1445 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
1446 {
1447  struct rte_mempool_cache *cache;
1448  cache = rte_mempool_default_cache(mp, rte_lcore_id());
1449  return rte_mempool_generic_get(mp, obj_p, 1, cache, 0);
1450 }
1451 
1469 __rte_deprecated
1470 static inline int __attribute__((always_inline))
1471 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
1472 {
1473  return rte_mempool_generic_get(mp, obj_p, 1, NULL, MEMPOOL_F_SC_GET);
1474 }
1475 
1496 static inline int __attribute__((always_inline))
1497 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1498 {
1499  return rte_mempool_get_bulk(mp, obj_p, 1);
1500 }
1501 
1514 unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1515 
1529 __rte_deprecated
1530 unsigned rte_mempool_count(const struct rte_mempool *mp);
1531 
1544 unsigned int
1545 rte_mempool_in_use_count(const struct rte_mempool *mp);
1546 
1565 __rte_deprecated
1566 static inline unsigned
1568 {
1569  return rte_mempool_in_use_count(mp);
1570 }
1571 
1585 static inline int
1587 {
1588  return !!(rte_mempool_avail_count(mp) == mp->size);
1589 }
1590 
1604 static inline int
1606 {
1607  return !!(rte_mempool_avail_count(mp) == 0);
1608 }
1609 
1622 static inline phys_addr_t
1623 rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
1624 {
1625  const struct rte_mempool_objhdr *hdr;
1626  hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1627  sizeof(*hdr));
1628  return hdr->physaddr;
1629 }
1630 
1641 void rte_mempool_audit(struct rte_mempool *mp);
1642 
1651 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1652 {
1653  return (char *)mp +
1655 }
1656 
1663 void rte_mempool_list_dump(FILE *f);
1664 
1677 struct rte_mempool *rte_mempool_lookup(const char *name);
1678 
1696 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1697  struct rte_mempool_objsz *sz);
1698 
1720 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
1721  uint32_t pg_shift);
1722 
1750 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
1751  size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
1752  uint32_t pg_shift);
1753 
1762 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1763  void *arg);
1764 
1765 #ifdef __cplusplus
1766 }
1767 #endif
1768 
1769 #endif /* _RTE_MEMPOOL_H_ */
uint64_t pool_id
Definition: rte_mempool.h:223
__rte_deprecated unsigned rte_mempool_count(const struct rte_mempool *mp)
static int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1497
struct rte_mempool * rte_mempool_lookup(const char *name)
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
static __rte_deprecated int rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: rte_mempool.h:1361
static __rte_deprecated unsigned rte_mempool_free_count(const struct rte_mempool *mp)
Definition: rte_mempool.h:1567
static struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
Definition: rte_mempool.h:1015
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
uint32_t header_size
Definition: rte_mempool.h:234
const struct rte_memzone * mz
Definition: rte_mempool.h:226
static phys_addr_t rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
Definition: rte_mempool.h:1623
static __rte_deprecated int rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1445
struct rte_mempool * mp
Definition: rte_mempool.h:202
void rte_mempool_list_dump(FILE *f)
rte_mempool_alloc_t alloc
Definition: rte_mempool.h:393
struct rte_mempool * rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags, void *vaddr, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
void( rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr, void *opaque)
Definition: rte_mempool.h:191
void( rte_mempool_obj_cb_t)(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
Definition: rte_mempool.h:568
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
Definition: rte_mempool.h:366
static int rte_mempool_empty(const struct rte_mempool *mp)
Definition: rte_mempool.h:1605
#define __rte_unused
Definition: rte_common.h:97
static __rte_deprecated void rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1189
static __rte_deprecated int rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1471
phys_addr_t physaddr
Definition: rte_mempool.h:158
phys_addr_t phys_addr
Definition: rte_mempool.h:204
rte_mempool_memchunk_free_cb_t * free_cb
Definition: rte_mempool.h:206
char name[RTE_MEMPOOL_OPS_NAMESIZE]
Definition: rte_mempool.h:392
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:382
#define MEMPOOL_HEADER_SIZE(mp, cs)
Definition: rte_mempool.h:297
uint32_t cache_size
Definition: rte_mempool.h:230
static int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: rte_mempool.h:1419
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
uint32_t size
Definition: rte_mempool.h:229
struct rte_mempool * mp
#define MEMPOOL_F_SC_GET
Definition: rte_mempool.h:263
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:110
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
uint32_t total_size
Definition: rte_mempool.h:123
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
#define unlikely(x)
int rte_mempool_populate_default(struct rte_mempool *mp)
static void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
Definition: rte_mempool.h:997
#define RTE_MEMPOOL_OPS_NAMESIZE
Definition: rte_mempool.h:354
STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
uint32_t nb_mem_chunks
Definition: rte_mempool.h:251
struct rte_mempool * mp
Definition: rte_mempool.h:157
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
Definition: rte_mempool.h:417
rte_mempool_get_count get_count
Definition: rte_mempool.h:397
static int rte_mempool_full(const struct rte_mempool *mp)
Definition: rte_mempool.h:1586
static unsigned rte_lcore_id(void)
Definition: rte_lcore.h:92
void * pool_config
Definition: rte_mempool.h:225
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
void( rte_mempool_mem_cb_t)(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
Definition: rte_mempool.h:577
#define rte_panic(...)
Definition: rte_debug.h:79
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
static __rte_deprecated int rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: rte_mempool.h:1390
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
Definition: rte_mempool.h:388
static void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned n)
Definition: rte_mempool.h:1170
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void * objs[RTE_MEMPOOL_CACHE_MAX_SIZE *3]
Definition: rte_mempool.h:113
void * pool_data
Definition: rte_mempool.h:222
static __rte_deprecated void rte_mempool_mp_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned n)
Definition: rte_mempool.h:1128
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
uint32_t elt_size
Definition: rte_mempool.h:233
void rte_mempool_audit(struct rte_mempool *mp)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
unsigned private_data_size
Definition: rte_mempool.h:237
void( rte_mempool_ctor_t)(struct rte_mempool *, void *)
Definition: rte_mempool.h:587
rte_spinlock_t sl
Definition: rte_mempool.h:412
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
int rte_mempool_populate_anon(struct rte_mempool *mp)
struct rte_mempool_cache * local_cache
Definition: rte_mempool.h:247
uint32_t trailer_size
Definition: rte_mempool.h:235
#define RTE_STD_C11
Definition: rte_common.h:64
static int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, struct rte_mempool_cache *cache, int flags)
Definition: rte_mempool.h:1330
rte_mempool_dequeue_t dequeue
Definition: rte_mempool.h:396
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:376
ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
#define MEMPOOL_F_SP_PUT
Definition: rte_mempool.h:262
uint32_t flushthresh
Definition: rte_mempool.h:107
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
uint64_t phys_addr_t
Definition: rte_memory.h:103
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:115
#define __rte_cache_aligned
Definition: rte_memory.h:96
size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
Definition: rte_mempool.h:317
uint32_t header_size
Definition: rte_mempool.h:121
int32_t ops_index
Definition: rte_mempool.h:245
static __rte_deprecated void rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1207
static void * rte_memcpy(void *dst, const void *src, size_t n)
uint32_t populated_size
Definition: rte_mempool.h:249
static __rte_deprecated void rte_mempool_sp_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned n)
Definition: rte_mempool.h:1149
static void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned n, struct rte_mempool_cache *cache, int flags)
Definition: rte_mempool.h:1108
void rte_mempool_free(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1651
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:219
uint32_t trailer_size
Definition: rte_mempool.h:122
static void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1225
int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
#define RTE_MEMPOOL_MAX_OPS_IDX
Definition: rte_mempool.h:400
void(* rte_mempool_free_t)(struct rte_mempool *mp)
Definition: rte_mempool.h:371
rte_mempool_free_t free
Definition: rte_mempool.h:394
rte_mempool_enqueue_t enqueue
Definition: rte_mempool.h:395
#define RTE_MEMZONE_NAMESIZE
Definition: rte_memzone.h:78