聊一聊无锁队列rte_ring 转载
之前用基于dpdk 实现小包快速转发的时候有用到无锁队列!今天就来看看吧!(后续完成了去dpdk化,直接在内核完成快速转发功能)
dpdk的无锁队列ring是借鉴了linux内核kfifo无锁队列。ring的实质是FIFO的环形队列。
- 先进先出(FIFO)
- 最大大小固定,指针存储在表中
- 无锁实现
- 多消费者或单消费者出队操作
- 多生产者或单生产者入队操作
- 批量出队 - 如果成功,将指定数量的元素出队,否则什么也不做
- 批量入队 - 如果成功,将指定数量的元素入队,否则什么也不做
- 突发出队 - 如果指定的数目出队失败,则将最大可用数目对象出队
- 突发入队 - 如果指定的数目入队失败,则将最大可入队数目对象入队
相比于链表,这个数据结构的优点如下:
- 更快;只需要一个sizeof(void *)的Compare-And-Swap指令,而不是多个双重比较和交换指令
- 与完全无锁队列像是
- 适应批量入队/出队操作。 因为指针是存储在表中的,应i多个对象的出队将不会产生于链表队列中一样多的cache miss。 此外,批量出队成本并不比单个对象出队高。
缺点:
- 大小固定
- 大量ring相比于链表,消耗更多的内存,空ring至少包含n个指针。
/* structure to hold a pair of head/tail values and other metadata */ struct rte_ring_headtail { // 生产者头尾指针,生产完成后都指向队尾 // 消费者头尾指针,生产完成后都指向队头 volatile uint32_t head; /**< Prod/consumer head.预生产到地方/预出队的地方 */ volatile uint32_t tail; /**< Prod/consumer tail. 实际生产了的数量 /实际出队的地方 */ uint32_t single; /**< True if single prod/cons */ }; struct rte_ring { /* * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI * compatibility requirements, it could be changed to RTE_RING_NAMESIZE * next time the ABI changes */ char name[RTE_MEMZONE_NAMESIZE] __rte_cache_aligned; /**< Name of the ring. */ int flags; /**< Flags supplied at creation. */ const struct rte_memzone *memzone; /**< Memzone, if any, containing the rte_ring */ uint32_t size; /**< Size of ring. */ uint32_t mask; /**< Mask (size-1) of ring. */ uint32_t capacity; /**< Usable size of ring */ char pad0 __rte_cache_aligned; /**< empty cache line */ /** Ring producer status. */ struct rte_ring_headtail prod __rte_cache_aligned; char pad1 __rte_cache_aligned; /**< empty cache line */ /** Ring consumer status. */ struct rte_ring_headtail cons __rte_cache_aligned; char pad2 __rte_cache_aligned; /**< empty cache line */ };
入队列:
http://reader.epubee.com/books/mobile/54/54aa973816d258a932e39464018932ee/text00032.html 以上来自~~~~~~~~~~~~~~
static __rte_always_inline unsigned int __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table, unsigned int n, enum rte_ring_queue_behavior behavior, unsigned int is_sp, unsigned int *free_space) { uint32_t prod_head, prod_next; uint32_t free_entries; n = __rte_ring_move_prod_head(r, is_sp, n, behavior, &prod_head, &prod_next, &free_entries); if (n == 0) goto end; //prod_head是旧的r->prod.head //r经过__rte_ring_move_prod_head处理后,r->prod.head已经移动到想要的位置&r[1]是数据的位置 ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *); update_tail(&r->prod, prod_head, prod_next, is_sp, 1); end: if (free_space != NULL) *free_space = free_entries - n; return n; } static __rte_always_inline unsigned int __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp, unsigned int n, enum rte_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *free_entries) { const uint32_t capacity = r->capacity; unsigned int max = n; int success; do { /* Reset n to the initial burst count */ n = max; *old_head = r->prod.head; /* add rmb barrier to avoid load/load reorder in weak * memory model. It is noop on x86 */ rte_smp_rmb(); /* * The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have * *old_head > cons_tail). So 'free_entries' is always between 0 * and capacity (which is < size). 计算当前可用容量, cons.tail是小于等于prod.head, 所以r->cons.tail - *old_head得到一个 负数,capacity减这个差值就得到剩余的容量 */ *free_entries = (capacity + r->cons.tail - *old_head); /* check that we have enough room in ring */ if (unlikely(n > *free_entries)) n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *free_entries; if (n == 0) return 0; *new_head = *old_head + n; /* 新头的位置 */ if (is_sp) {/* 如果是单生产者,直接更新r->prod.head即可,不需要加锁 */ r->prod.head = *new_head, success = 1; }else{ /* 如果是多生产者,需要使用cmpset比较,如果&r->prod.head == *old_head 则&r->prod.head = *new_head 否则重新循环,获取新的*old_head = r->prod.head,知道成功位置*/ success = rte_atomic32_cmpset(&r->prod.head, *old_head, *new_head); } } while (unlikely(success == 0)); return n; }
出队:
原理逻辑和入队一样 代码也比较相似,不具体分析
static __rte_always_inline unsigned int __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table, unsigned int n, enum rte_ring_queue_behavior behavior, unsigned int is_sc, unsigned int *available) { uint32_t cons_head, cons_next; uint32_t entries; n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior, &cons_head, &cons_next, &entries); if (n == 0) goto end; DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *); update_tail(&r->cons, cons_head, cons_next, is_sc, 0); end: if (available != NULL) *available = entries - n; return n; } static __rte_always_inline unsigned int __rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc, unsigned int n, enum rte_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *entries) { unsigned int max = n; int success; /* move cons.head atomically */ do { /* Restore n as it may change every loop */ n = max; *old_head = r->cons.head; /* add rmb barrier to avoid load/load reorder in weak * memory model. It is noop on x86 */ rte_smp_rmb(); /* The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have * cons_head > prod_tail). So 'entries' is always between 0 * and size(ring)-1. */ *entries = (r->prod.tail - *old_head); /* Set the actual entries for dequeue */ if (n > *entries) n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries; if (unlikely(n == 0)) return 0; *new_head = *old_head + n; if (is_sc) r->cons.head = *new_head, success = 1; else success = rte_atomic32_cmpset(&r->cons.head, *old_head, *new_head); } while (unlikely(success == 0)); return n; }
ring 代码
/** * @file * lock-free Ring * * The Ring Manager is a fixed-size queue, implemented as a table of * pointers. Head and tail pointers are modified atomically, allowing * concurrent access to it. It has the following features: * * - FIFO (First In First Out) * - Maximum size is fixed; the pointers are stored in a table. * - Lockless implementation. * - Multi- or single-consumer dequeue. * - Multi- or single-producer enqueue. * - Bulk dequeue. * - Bulk enqueue. * * Note: the ring implementation is not preemptable. A lcore must not * be interrupted by another task that uses the same ring. * */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include "list.h" #include "mring.h" #define EV_RING_NAMESIZE 32 /**< The maximum length of a ring name. */ /** * An lock-free ring structure. * * The producer and the consumer have a head and a tail index. The particularity * of these index is that they are not between 0 and size(ring). These indexes * are between 0 and 2^32, and we mask their value when we access the ring[] * field. Thanks to this assumption, we can do subtractions between 2 index * values in a modulo-32bit base: that's why the overflow of the indexes is not * a problem. */ struct ev_ring { struct list_head list; /**< Next in list. */ char name[EV_RING_NAMESIZE]; /**< Name of the ring. */ int32_t flags; /**< Flags supplied at creation. */ /** Ring producer status. */ struct prod { volatile uint32_t bulk_default; /**< Default bulk count. */ uint32_t watermark; /**< Maximum items before EDQUOT. */ uint32_t sp_enqueue; /**< True, if single producer. */ uint32_t use_nop; /**< True, if use null loop for wait, either use yield. */ uint32_t size; /**< Size of ring. */ uint32_t mask; /**< Mask (size-1) of ring. */ volatile uint32_t head; /**< Producer head. */ volatile uint32_t tail; /**< Producer tail. */ } prod __ev_arch_cache_aligned; /** Ring consumer status. */ struct cons { volatile uint32_t bulk_default; /**< Default bulk count. */ uint32_t sc_dequeue; /**< True, if single consumer. */ uint32_t use_nop; /**< True, if use null loop for wait, either use yield. */ uint32_t size; /**< Size of the ring. */ uint32_t mask; /**< Mask (size-1) of ring. */ volatile uint32_t head; /**< Consumer head. */ volatile uint32_t tail; /**< Consumer tail. */ } cons __ev_arch_cache_aligned; int volatile ring[0] \ __ev_arch_cache_aligned; /**< Memory space of ring starts here. */ }; /** * Create a new ring named *name* in memory. * * This function uses ``memzone_reserve()`` to allocate memory. Its size is * set to *count*, which must be a power of two. Water marking is * disabled by default. The default bulk count is initialized to 1. * Note that the real usable ring size is *count-1* instead of * *count*. * * @param name * The name of the ring. * @param count * The size of the ring (must be a power of 2). * @param socket_id * The *socket_id* argument is the socket identifier in case of * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA * constraint for the reserved zone. * @param flags * An OR of the following: * - EV_RING_F_SP_EN: If this flag is set, the default behavior when * using ``ev_ring_enqueue()`` or ``ev_ring_enqueue_bulk()`` * is "single-producer". Otherwise, it is "multi-producers". * - EV_RING_F_SC_EN: If this flag is set, the default behavior when * using ``ev_ring_dequeue()`` or ``ev_ring_dequeue_bulk()`` * is "single-consumer". Otherwise, it is "multi-consumers". * @return * On success, the pointer to the new allocated ring. NULL on error with * rte_errno set appropriately. Possible errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure * - E_RTE_SECONDARY - function was called from a secondary process instance * - E_RTE_NO_TAILQ - no tailq list could be got for the ring list * - EINVAL - count provided is not a power of 2 * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists * - ENOMEM - no appropriate memory area found in which to create memzone */ ev_ring_st *ev_ring_create(const char *name, uint32_t count,uint32_t flags); /** * Set the default bulk count for enqueue/dequeue. * * The parameter *count* is the default number of bulk elements to * get/put when using ``ev_ring_*_{en,de}queue_bulk()``. It must be * greater than 0 and less than half of the ring size. * * @param r * A pointer to the ring structure. * @param count * A new water mark value. * @return * - 0: Success; default_bulk_count changed. * - -EINVAL: Invalid count value. */ static inline int32_t ev_ring_set_bulk_count(ev_ring_st *r, uint32_t count) { if (unlikely(count == 0 || count >= r->prod.size)) return -1; r->prod.bulk_default = count; r->cons.bulk_default = count; return 0; } /** * Get the default bulk count for enqueue/dequeue. * * @param r * A pointer to the ring structure. * @return * The default bulk count for enqueue/dequeue. */ static inline uint32_t ev_ring_get_bulk_count(ev_ring_st *r) { return r->prod.bulk_default; } /** * Change the high water mark. * * If *count* is 0, water marking is disabled. Otherwise, it is set to the * *count* value. The *count* value must be greater than 0 and less * than the ring size. * * This function can be called at any time (not necessarilly at * initialization). * * @param r * A pointer to the ring structure. * @param count * The new water mark value. * @return * - 0: Success; water mark changed. * - -EINVAL: Invalid water mark value. */ int32_t ev_ring_set_water_mark(ev_ring_st *r, uint32_t count); /** * Enqueue several objects on the ring (multi-producers safe). * * This function uses a "compare and set" instruction to move the * producer index atomically. * * @param r * A pointer to the ring structure. * @param obj_table * A pointer to a table of void * pointers (objects). * @param n * The number of objects to add in the ring from the obj_table. The * value must be strictly positive. * @return * - 0: Success; objects enqueue. * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the * high water mark is exceeded. * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued. */ static inline int32_t ev_ring_mp_enqueue_bulk(ev_ring_st *r, int *obj_table, uint32_t n) { uint32_t prod_head, prod_next; uint32_t cons_tail, free_entries; int32_t success; uint32_t i; uint32_t mask = r->prod.mask; int32_t ret; /* move prod.head atomically */ do { prod_head = r->prod.head; cons_tail = r->cons.tail; /* The subtraction is done between two uint32_t 32bits value * (the result is always modulo 32 bits even if we have * prod_head > cons_tail). So 'free_entries' is always between 0 * and size(ring)-1. */ free_entries = (mask + cons_tail - prod_head); /* check that we have enough room in ring */ if (unlikely(n > free_entries)) { return -1; } prod_next = prod_head + n; success = ev_atomic32_cmpset(&r->prod.head, prod_head, prod_next); } while (unlikely(success == 0)); /* write entries in ring */ for (i = 0; likely(i < n); i++) r->ring[(prod_head + i) & mask] = obj_table[i]; ev_wmb(); /* return -EDQUOT if we exceed the watermark */ if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) { ret = -1; } else { ret = 0; } /* * If there are other enqueues in progress that preceeded us, * we need to wait for them to complete */ if (r->prod.use_nop) { while (unlikely(r->prod.tail != prod_head)) ev_nop(); } else { while (unlikely(r->prod.tail != prod_head)) ev_yield(); } r->prod.tail = prod_next; return ret; } /** * Enqueue several objects on a ring (NOT multi-producers safe). * * @param r * A pointer to the ring structure. * @param obj_table * A pointer to a table of void * pointers (objects). * @param n * The number of objects to add in the ring from the obj_table. The * value must be strictly positive. * @return * - 0: Success; objects enqueued. * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the * high water mark is exceeded. * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. */ static inline int32_t ev_ring_sp_enqueue_bulk(ev_ring_st *r, int *obj_table, uint32_t n) { uint32_t prod_head, cons_tail; uint32_t prod_next, free_entries; uint32_t i; uint32_t mask = r->prod.mask; int32_t ret; prod_head = r->prod.head; cons_tail = r->cons.tail; /* The subtraction is done between two uint32_t 32bits value * (the result is always modulo 32 bits even if we have * prod_head > cons_tail). So 'free_entries' is always between 0 * and size(ring)-1. */ free_entries = mask + cons_tail - prod_head; /* check that we have enough room in ring */ if (unlikely(n > free_entries)) { return -1; } prod_next = prod_head + n; r->prod.head = prod_next; /* write entries in ring */ for (i = 0; likely(i < n); i++) r->ring[(prod_head + i) & mask] = obj_table[i]; ev_wmb(); /* return -EDQUOT if we exceed the watermark */ if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) { ret = -1; } else { ret = 0; } r->prod.tail = prod_next; return ret; } /** * Enqueue several objects on a ring. * * This function calls the multi-producer or the single-producer * version depending on the default behavior that was specified at * ring creation time (see flags). * * @param r * A pointer to the ring structure. * @param obj_table * A pointer to a table of void * pointers (objects). * @param n * The number of objects to add in the ring from the obj_table. * @return * - 0: Success; objects enqueued. * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the * high water mark is exceeded. * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. */ int32_t ev_ring_enqueue_bulk(ev_ring_st *r, void * const *obj_table, uint32_t n) { if (r->prod.sp_enqueue) return ev_ring_sp_enqueue_bulk(r, obj_table, n); else return ev_ring_mp_enqueue_bulk(r, obj_table, n); } /** * Enqueue one object on a ring (multi-producers safe). * * This function uses a "compare and set" instruction to move the * producer index atomically. * * @param r * A pointer to the ring structure. * @param obj * A pointer to the object to be added. * @return * - 0: Success; objects enqueued. * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the * high water mark is exceeded. * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. */ static inline int32_t ev_ring_mp_enqueue(ev_ring_st *r, void *obj) { return ev_ring_mp_enqueue_bulk(r, &obj, 1); } /** * Enqueue one object on a ring (NOT multi-producers safe). * * @param r * A pointer to the ring structure. * @param obj * A pointer to the object to be added. * @return * - 0: Success; objects enqueued. * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the * high water mark is exceeded. * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. */ static inline int32_t ev_ring_sp_enqueue(ev_ring_st *r, void *obj) { return ev_ring_sp_enqueue_bulk(r, &obj, 1); } /** * Enqueue one object on a ring. * * This function calls the multi-producer or the single-producer * version, depending on the default behaviour that was specified at * ring creation time (see flags). * * @param r * A pointer to the ring structure. * @param obj * A pointer to the object to be added. * @return * - 0: Success; objects enqueued. * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the * high water mark is exceeded. * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. */ int32_t ev_ring_enqueue(ev_ring_st *r, void *obj) { if (r->prod.sp_enqueue) return ev_ring_sp_enqueue(r, obj); else return ev_ring_mp_enqueue(r, obj); } /** * Dequeue several objects from a ring (multi-consumers safe). * * This function uses a "compare and set" instruction to move the * consumer index atomically. * * @param r * A pointer to the ring structure. * @param obj_table * A pointer to a table of void * pointers (objects) that will be filled. * @param n * The number of objects to dequeue from the ring to the obj_table, * must be strictly positive * @return * - 0: Success; objects dequeued. * - -ENOENT: Not enough entries in the ring to dequeue; no object is * dequeued. */ static inline int32_t ev_ring_mc_dequeue_bulk(ev_ring_st *r, void **obj_table, uint32_t n) { uint32_t cons_head, prod_tail; uint32_t cons_next, entries; int32_t success; uint32_t i; uint32_t mask = r->prod.mask; /* move cons.head atomically */ do { cons_head = r->cons.head; prod_tail = r->prod.tail; /* The subtraction is done between two uint32_t 32bits value * (the result is always modulo 32 bits even if we have * cons_head > prod_tail). So 'entries' is always between 0 * and size(ring)-1. */ entries = (prod_tail - cons_head); /* check that we have enough entries in ring */ if (unlikely(n > entries)) { return -1; } cons_next = cons_head + n; success = ev_atomic32_cmpset(&r->cons.head, cons_head, cons_next); } while (unlikely(success == 0)); /* copy in table */ ev_rmb(); for (i = 0; likely(i < n); i++) { obj_table[i] = r->ring[(cons_head + i) & mask]; } /* * If there are other dequeues in progress that preceeded us, * we need to wait for them to complete */ if (r->cons.use_nop) { while (unlikely(r->cons.tail != cons_head)) ev_nop(); } else { while (unlikely(r->cons.tail != cons_head)) ev_yield(); } r->cons.tail = cons_next; return 0; } /** * Dequeue several objects from a ring (NOT multi-consumers safe). * * @param r * A pointer to the ring structure. * @param obj_table * A pointer to a table of void * pointers (objects) that will be filled. * @param n * The number of objects to dequeue from the ring to the obj_table, * must be strictly positive. * @return * - 0: Success; objects dequeued. * - -ENOENT: Not enough entries in the ring to dequeue; no object is * dequeued. */ static int32_t ev_ring_sc_dequeue_bulk(ev_ring_st *r, void **obj_table, uint32_t n) { uint32_t cons_head, prod_tail; uint32_t cons_next, entries; int32_t i; uint32_t mask = r->prod.mask; cons_head = r->cons.head; prod_tail = r->prod.tail; /* The subtraction is done between two uint32_t 32bits value * (the result is always modulo 32 bits even if we have * cons_head > prod_tail). So 'entries' is always between 0 * and size(ring)-1. */ entries = prod_tail - cons_head; /* check that we have enough entries in ring */ if (unlikely(n > entries)) { return -1; } cons_next = cons_head + n; r->cons.head = cons_next; /* copy in table */ ev_rmb(); for (i = 0; likely(i < n); i++) { obj_table[i] = r->ring[(cons_head + i) & mask]; } r->cons.tail = cons_next; return 0; } /** * Dequeue several objects from a ring. * * This function calls the multi-consumers or the single-consumer * version, depending on the default behaviour that was specified at * ring creation time (see flags). * * @param r * A pointer to the ring structure. * @param obj_table * A pointer to a table of void * pointers (objects) that will be filled. * @param n * The number of objects to dequeue from the ring to the obj_table. * @return * - 0: Success; objects dequeued. * - -ENOENT: Not enough entries in the ring to dequeue, no object is * dequeued. */ int32_t ev_ring_dequeue_bulk(ev_ring_st *r, void **obj_table, uint32_t n) { if (r->cons.sc_dequeue) return ev_ring_sc_dequeue_bulk(r, obj_table, n); else return ev_ring_mc_dequeue_bulk(r, obj_table, n); } /** * Dequeue one object from a ring (multi-consumers safe). * * This function uses a "compare and set" instruction to move the * consumer index atomically. * * @param r * A pointer to the ring structure. * @param obj_p * A pointer to a void * pointer (object) that will be filled. * @return * - 0: Success; objects dequeued. * - -ENOENT: Not enough entries in the ring to dequeue; no object is * dequeued. */ static int32_t ev_ring_mc_dequeue(ev_ring_st *r, void **obj_p) { return ev_ring_mc_dequeue_bulk(r, obj_p, 1); } /** * Dequeue one object from a ring (NOT multi-consumers safe). * * @param r * A pointer to the ring structure. * @param obj_p * A pointer to a void * pointer (object) that will be filled. * @return * - 0: Success; objects dequeued. * - -ENOENT: Not enough entries in the ring to dequeue, no object is * dequeued. */ static int32_t ev_ring_sc_dequeue(ev_ring_st *r, void **obj_p) { return ev_ring_sc_dequeue_bulk(r, obj_p, 1); } /** * Dequeue one object from a ring. * * This function calls the multi-consumers or the single-consumer * version depending on the default behaviour that was specified at * ring creation time (see flags). * * @param r * A pointer to the ring structure. * @param obj_p * A pointer to a void * pointer (object) that will be filled. * @return * - 0: Success, objects dequeued. * - -ENOENT: Not enough entries in the ring to dequeue, no object is * dequeued. */ int32_t ev_ring_dequeue(ev_ring_st *r, void **obj_p) { if (r->cons.sc_dequeue) return ev_ring_sc_dequeue(r, obj_p); else return ev_ring_mc_dequeue(r, obj_p); } /** * Test if a ring is full. * * @param r * A pointer to the ring structure. * @return * - 1: The ring is full. * - 0: The ring is not full. */ int32_t ev_ring_full(const ev_ring_st *r) { uint32_t prod_tail = r->prod.tail; uint32_t cons_tail = r->cons.tail; return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0); } /** * Test if a ring is empty. * * @param r * A pointer to the ring structure. * @return * - 1: The ring is empty. * - 0: The ring is not empty. */ int32_t ev_ring_empty(const ev_ring_st *r) { uint32_t prod_tail = r->prod.tail; uint32_t cons_tail = r->cons.tail; return !!(cons_tail == prod_tail); } /** * Return the number of entries in a ring. * * @param r * A pointer to the ring structure. * @return * The number of entries in the ring. */ uint32_t ev_ring_count(const ev_ring_st *r) { uint32_t prod_tail = r->prod.tail; uint32_t cons_tail = r->cons.tail; return ((prod_tail - cons_tail) & r->prod.mask); } /** * Return the number of free entries in a ring. * * @param r * A pointer to the ring structure. * @return * The number of free entries in the ring. */ uint32_t ev_ring_free_count(const ev_ring_st *r) { uint32_t prod_tail = r->prod.tail; uint32_t cons_tail = r->cons.tail; return ((cons_tail - prod_tail - 1) & r->prod.mask); } /** * Search a ring from its name * * @param name * The name of the ring. * @return * The pointer to the ring matching the name, or NULL if not found, * with rte_errno set appropriately. Possible rte_errno values include: * - ENOENT - required entry not available to return. */ ev_ring_st *ev_ring_lookup(const char *name); /* global list of ring (used for debug/dump) */ //static struct list_head ring_list = {NULL, NULL}; //debug dump的东西稍后实现,现在放在这里不能线程安全 /* create the ring */ ev_ring_st * ev_ring_create(const char *name, uint32_t count, uint32_t flags) { ev_ring_st *r = NULL; uint32_t ring_size = 0; if (count == 0) return NULL; /* count must be a power of 2 */ if (!POWEROF2(count)) { return NULL; } //ring_size = count * sizeof(void *) + sizeof(ev_ring_st); ring_size = count * sizeof(int) + sizeof(ev_ring_st); /* reserve a memory zone for this ring. If we can't get rte_config or * we are secondary process, the memzone_reserve function will set * rte_errno for us appropriately - hence no check in this this function */ r = (ev_ring_st *)malloc(ring_size); if (r == NULL) { return NULL; } /* init the ring structure */ memset(r, 0, sizeof(*r)); snprintf(r->name, EV_RING_NAMESIZE, "%s", name); r->flags = flags; r->prod.bulk_default = 1; r->cons.bulk_default = 1; r->prod.watermark = count; r->prod.sp_enqueue = !!(flags & EV_RING_F_SP_EN); r->prod.use_nop = !!(flags & EV_RING_F_MORE_CORE); r->cons.sc_dequeue = !!(flags & EV_RING_F_SC_EN); r->cons.use_nop = !!(flags & EV_RING_F_MORE_CORE); r->prod.size = count; r->cons.size = count; r->prod.mask = count -1; r->cons.mask = count-1; r->prod.head = 0; r->cons.head = 0; r->prod.tail = 0; r->cons.tail = 0; //list_add_tail(&r->list, &ring_list); return r; } void ev_ring_destroy(ev_ring_st *ring) { free(ring); } /* * change the high water mark. If *count* is 0, water marking is * disabled */ int32_t ev_ring_set_water_mark(ev_ring_st *r, uint32_t count) { if (count >= r->prod.size) return -1; /* if count is 0, disable the watermarking */ if (count == 0) count = r->prod.size; r->prod.watermark = count; return 0; }
http代理服务器(3-4-7层代理)-网络事件库公共组件、内核kernel驱动 摄像头驱动 tcpip网络协议栈、netfilter、bridge 好像看过!!!!
但行好事 莫问前程
--身高体重180的胖子