Discussion:
[dpdk-dev] [PATCH v1 01/14] ring: remove split cacheline build setting
Bruce Richardson
2017-02-23 17:23:54 UTC
Permalink
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. For improved
performance use 128B rather than 64B alignment since it stops the producer
and consumer data being on adjacent cachelines.

Signed-off-by: Bruce Richardson <***@intel.com>
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 6 ++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 8 ++------
4 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/config/common_base b/config/common_base
index aeee13e..099ffda 100644
--- a/config/common_base
+++ b/config/common_base
@@ -448,7 +448,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e25ea9f..ea45e0c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -110,6 +110,12 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================

+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+ have been made to it:
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..04fe667 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -168,7 +168,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);

/** Ring consumer status. */
struct cons {
@@ -177,11 +177,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Consumer head. */
volatile uint32_t tail; /**< Consumer tail. */
-#ifdef RTE_RING_SPLIT_PROD_CONS
- } cons __rte_cache_aligned;
-#else
- } cons;
-#endif
+ } cons __rte_aligned(RTE_CACHE_LINE_SIZE * 2);

#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
--
2.9.3
Bruce Richardson
2017-02-23 17:23:55 UTC
Permalink
create a common structure to hold the metadata for the producer and
the consumer, since both need essentially the same information - the
head and tail values, the ring size and mask.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.h | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 04fe667..0c8defd 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,6 +139,19 @@ struct rte_ring_debug_stats {

struct rte_memzone; /* forward declaration, so as not to require memzone.h */

+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_ht_ptr {
+ volatile uint32_t head; /**< Prod/consumer head. */
+ volatile uint32_t tail; /**< Prod/consumer tail. */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ union {
+ uint32_t sp_enqueue; /**< True, if single producer. */
+ uint32_t sc_dequeue; /**< True, if single consumer. */
+ };
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */
+};
+
/**
* An RTE ring structure.
*
@@ -161,23 +174,10 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */

/** Ring producer status. */
- struct prod {
- uint32_t watermark; /**< Maximum items before EDQUOT. */
- uint32_t sp_enqueue; /**< True, if single producer. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Producer head. */
- volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
+ struct rte_ring_ht_ptr prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);

/** Ring consumer status. */
- struct cons {
- uint32_t sc_dequeue; /**< True, if single consumer. */
- uint32_t size; /**< Size of the ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Consumer head. */
- volatile uint32_t tail; /**< Consumer tail. */
- } cons __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
+ struct rte_ring_ht_ptr cons __rte_aligned(RTE_CACHE_LINE_SIZE * 2);

#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
--
2.9.3
Olivier Matz
2017-03-01 10:22:43 UTC
Permalink
On Thu, 23 Feb 2017 17:23:55 +0000, Bruce Richardson
Post by Bruce Richardson
create a common structure to hold the metadata for the producer and
the consumer, since both need essentially the same information - the
head and tail values, the ring size and mask.
---
lib/librte_ring/rte_ring.h | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 04fe667..0c8defd 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_ht_ptr {
Just wondering if we can find a better name for this structure. I'm not
sure '_ptr' is really relevant. What do you think of:

rte_ring_endpoint
rte_ring_ht
rte_ring_headtail



Olivier
Bruce Richardson
2017-03-01 10:33:31 UTC
Permalink
Post by Olivier Matz
On Thu, 23 Feb 2017 17:23:55 +0000, Bruce Richardson
Post by Bruce Richardson
create a common structure to hold the metadata for the producer and
the consumer, since both need essentially the same information - the
head and tail values, the ring size and mask.
---
lib/librte_ring/rte_ring.h | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 04fe667..0c8defd 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_ht_ptr {
Just wondering if we can find a better name for this structure. I'm not
rte_ring_endpoint
rte_ring_ht
rte_ring_headtail
I'll use one of the latter two in next version.

/Bruce
Bruce Richardson
2017-02-23 17:23:56 UTC
Permalink
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.

Signed-off-by: Bruce Richardson <***@intel.com>
---
app/test/test_ring.c | 6 +++---
lib/librte_ring/rte_ring.c | 20 ++++++++++----------
lib/librte_ring/rte_ring.h | 32 ++++++++++++++++----------------
3 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index ebcb896..5f09097 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -148,7 +148,7 @@ check_live_watermark_change(__attribute__((unused)) void *dummy)
}

/* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->prod.watermark;
+ watermark = r->watermark;
if (watermark != watermark_old &&
(watermark_old != 16 || watermark != 32)) {
printf("Bad watermark change %u -> %u\n", watermark_old,
@@ -213,7 +213,7 @@ test_set_watermark( void ){
printf( " ring lookup failed\n" );
goto error;
}
- count = r->prod.size*2;
+ count = r->size * 2;
setwm = rte_ring_set_water_mark(r, count);
if (setwm != -EINVAL){
printf("Test failed to detect invalid watermark count value\n");
@@ -222,7 +222,7 @@ test_set_watermark( void ){

count = 0;
rte_ring_set_water_mark(r, count);
- if (r->prod.watermark != r->prod.size) {
+ if (r->watermark != r->size) {
printf("Test failed to detect invalid watermark count value\n");
goto error;
}
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 4bc6da1..80fc356 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -144,11 +144,11 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.watermark = count;
+ r->watermark = count;
r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
- r->prod.size = r->cons.size = count;
- r->prod.mask = r->cons.mask = count-1;
+ r->size = count;
+ r->mask = count - 1;
r->prod.head = r->cons.head = 0;
r->prod.tail = r->cons.tail = 0;

@@ -269,14 +269,14 @@ rte_ring_free(struct rte_ring *r)
int
rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
{
- if (count >= r->prod.size)
+ if (count >= r->size)
return -EINVAL;

/* if count is 0, disable the watermarking */
if (count == 0)
- count = r->prod.size;
+ count = r->size;

- r->prod.watermark = count;
+ r->watermark = count;
return 0;
}

@@ -291,17 +291,17 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)

fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->prod.watermark == r->prod.size)
+ if (r->watermark == r->size)
fprintf(f, " watermark=0\n");
else
- fprintf(f, " watermark=%"PRIu32"\n", r->prod.watermark);
+ fprintf(f, " watermark=%"PRIu32"\n", r->watermark);

/* sum and dump statistics */
#ifdef RTE_LIBRTE_RING_DEBUG
@@ -318,7 +318,7 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
}
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 0c8defd..6e75c15 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -143,13 +143,10 @@ struct rte_memzone; /* forward declaration, so as not to require memzone.h */
struct rte_ring_ht_ptr {
volatile uint32_t head; /**< Prod/consumer head. */
volatile uint32_t tail; /**< Prod/consumer tail. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
union {
uint32_t sp_enqueue; /**< True, if single producer. */
uint32_t sc_dequeue; /**< True, if single consumer. */
};
- uint32_t watermark; /**< Max items before EDQUOT in producer. */
};

/**
@@ -169,9 +166,12 @@ struct rte_ring {
* next time the ABI changes
*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
- int flags; /**< Flags supplied at creation. */
+ int flags; /**< Flags supplied at creation. */
const struct rte_memzone *memzone;
/**< Memzone, if any, containing the rte_ring */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_ht_ptr prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
@@ -350,7 +350,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
- const uint32_t size = r->prod.size; \
+ const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
@@ -377,7 +377,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
uint32_t idx = cons_head & mask; \
- const uint32_t size = r->cons.size; \
+ const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
obj_table[i] = r->ring[idx]; \
@@ -432,7 +432,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
const unsigned max = n;
int success;
unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;
int ret;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -480,7 +480,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
@@ -539,7 +539,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
unsigned i;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;
int ret;

prod_head = r->prod.head;
@@ -575,7 +575,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
@@ -625,7 +625,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
const unsigned max = n;
int success;
unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -722,7 +722,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
unsigned i;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;

cons_head = r->cons.head;
prod_tail = r->prod.tail;
@@ -1051,7 +1051,7 @@ rte_ring_full(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
+ return ((cons_tail - prod_tail - 1) & r->mask) == 0;
}

/**
@@ -1084,7 +1084,7 @@ rte_ring_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->prod.mask;
+ return (prod_tail - cons_tail) & r->mask;
}

/**
@@ -1100,7 +1100,7 @@ rte_ring_free_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->prod.mask;
+ return (cons_tail - prod_tail - 1) & r->mask;
}

/**
@@ -1114,7 +1114,7 @@ rte_ring_free_count(const struct rte_ring *r)
static inline unsigned int
rte_ring_get_size(const struct rte_ring *r)
{
- return r->prod.size;
+ return r->size;
}

/**
--
2.9.3
Bruce Richardson
2017-02-23 17:23:57 UTC
Permalink
The debug option only provided statistics to the user, most of
which could be tracked by the application itself. Remove this as a
compile time option, and feature, simplifying the code.

Signed-off-by: Bruce Richardson <***@intel.com>
---
app/test/test_ring.c | 410 ---------------------------------
config/common_base | 1 -
doc/guides/prog_guide/ring_lib.rst | 7 -
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.c | 41 ----
lib/librte_ring/rte_ring.h | 97 +-------
6 files changed, 13 insertions(+), 544 deletions(-)

diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index 5f09097..3891f5d 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -763,412 +763,6 @@ test_ring_burst_basic(void)
return -1;
}

-static int
-test_ring_stats(void)
-{
-
-#ifndef RTE_LIBRTE_RING_DEBUG
- printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
- return 0;
-#else
- void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
- int ret;
- unsigned i;
- unsigned num_items = 0;
- unsigned failed_enqueue_ops = 0;
- unsigned failed_enqueue_items = 0;
- unsigned failed_dequeue_ops = 0;
- unsigned failed_dequeue_items = 0;
- unsigned last_enqueue_ops = 0;
- unsigned last_enqueue_items = 0;
- unsigned last_quota_ops = 0;
- unsigned last_quota_items = 0;
- unsigned lcore_id = rte_lcore_id();
- struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
-
- printf("Test the ring stats.\n");
-
- /* Reset the watermark in case it was set in another test. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Allocate some dummy object pointers. */
- src = malloc(RING_SIZE*2*sizeof(void *));
- if (src == NULL)
- goto fail;
-
- for (i = 0; i < RING_SIZE*2 ; i++) {
- src[i] = (void *)(unsigned long)i;
- }
-
- /* Allocate some memory for copied objects. */
- dst = malloc(RING_SIZE*2*sizeof(void *));
- if (dst == NULL)
- goto fail;
-
- memset(dst, 0, RING_SIZE*2*sizeof(void *));
-
- /* Set the head and tail pointers. */
- cur_src = src;
- cur_dst = dst;
-
- /* Do Enqueue tests. */
- printf("Test the dequeue stats.\n");
-
- /* Fill the ring up to RING_SIZE -1. */
- printf("Fill the ring.\n");
- for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
- rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- }
-
- /* Adjust for final enqueue = MAX_BULK -1. */
- cur_src--;
-
- printf("Verify that the ring is full.\n");
- if (rte_ring_full(r) != 1)
- goto fail;
-
-
- printf("Verify the enqueue success stats.\n");
- /* Stats should match above enqueue operations to fill the ring. */
- if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Current max objects is RING_SIZE -1. */
- if (ring_stats->enq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any failures yet. */
- if (ring_stats->enq_fail_bulk != 0)
- goto fail;
- if (ring_stats->enq_fail_objs != 0)
- goto fail;
-
-
- printf("Test stats for SP burst enqueue to a full ring.\n");
- num_items = 2;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for SP bulk enqueue to a full ring.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP burst enqueue to a full ring.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP bulk enqueue to a full ring.\n");
- num_items = 16;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- /* Do Dequeue tests. */
- printf("Test the dequeue stats.\n");
-
- printf("Empty the ring.\n");
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* There was only RING_SIZE -1 objects to dequeue. */
- cur_dst++;
-
- printf("Verify ring is empty.\n");
- if (1 != rte_ring_empty(r))
- goto fail;
-
- printf("Verify the dequeue success stats.\n");
- /* Stats should match above dequeue operations. */
- if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Objects dequeued is RING_SIZE -1. */
- if (ring_stats->deq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any dequeue failure stats yet. */
- if (ring_stats->deq_fail_bulk != 0)
- goto fail;
-
- printf("Test stats for SC burst dequeue with an empty ring.\n");
- num_items = 2;
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for SC bulk dequeue with an empty ring.\n");
- num_items = 4;
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC burst dequeue with an empty ring.\n");
- num_items = 8;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC bulk dequeue with an empty ring.\n");
- num_items = 16;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test total enqueue/dequeue stats.\n");
- /* At this point the enqueue and dequeue stats should be the same. */
- if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
- goto fail;
- if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
- goto fail;
- if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
- goto fail;
- if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
- goto fail;
-
-
- /* Watermark Tests. */
- printf("Test the watermark/quota stats.\n");
-
- printf("Verify the initial watermark stats.\n");
- /* Watermark stats should be 0 since there is no watermark. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Set a watermark. */
- rte_ring_set_water_mark(r, 16);
-
- /* Reset pointers. */
- cur_src = src;
- cur_dst = dst;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue below watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should still be 0. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Success stats should have increased. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
- goto fail;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue at watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != 1)
- goto fail;
- if (ring_stats->enq_quota_objs != num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP burst enqueue above watermark.\n");
- num_items = 1;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP burst enqueue above watermark.\n");
- num_items = 2;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP bulk enqueue above watermark.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP bulk enqueue above watermark.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- printf("Test watermark success stats.\n");
- /* Success stats should be same as last non-watermarked enqueue. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items)
- goto fail;
-
-
- /* Cleanup. */
-
- /* Empty the ring. */
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* Reset the watermark. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Free memory before test completed */
- free(src);
- free(dst);
- return 0;
-
-fail:
- free(src);
- free(dst);
- return -1;
-#endif
-}
-
/*
* it will always fail to create ring with a wrong ring size number in this function
*/
@@ -1335,10 +929,6 @@ test_ring(void)
if (test_ring_basic() < 0)
return -1;

- /* ring stats */
- if (test_ring_stats() < 0)
- return -1;
-
/* basic operations */
if (test_live_watermark_change() < 0)
return -1;
diff --git a/config/common_base b/config/common_base
index 099ffda..b3d8272 100644
--- a/config/common_base
+++ b/config/common_base
@@ -447,7 +447,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_LIBRTE_RING_DEBUG=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index 9f69753..d4ab502 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -110,13 +110,6 @@ Once an enqueue operation reaches the high water mark, the producer is notified,

This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.

-Debug
-~~~~~
-
-When debug is enabled (CONFIG_RTE_LIBRTE_RING_DEBUG is set),
-the library stores some per-ring statistic counters about the number of enqueues/dequeues.
-These statistics are per-core to avoid concurrent accesses or atomic operations.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index ea45e0c..e0ebd71 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -116,6 +116,7 @@ API Changes
have been made to it:

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
+ * removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 80fc356..90ee63f 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -131,12 +131,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_LIBRTE_RING_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif

/* init the ring structure */
memset(r, 0, sizeof(*r));
@@ -284,11 +278,6 @@ rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats sum;
- unsigned lcore_id;
-#endif
-
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
@@ -302,36 +291,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " watermark=0\n");
else
fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
-
- /* sum and dump statistics */
-#ifdef RTE_LIBRTE_RING_DEBUG
- memset(&sum, 0, sizeof(sum));
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
- sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
- sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
- sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
- sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
- sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
- sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
- sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
- sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
- sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
- }
- fprintf(f, " size=%"PRIu32"\n", r->size);
- fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
- fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
- fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
- fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
- fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
- fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
- fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
- fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
- fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
- fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
-#else
- fprintf(f, " no statistics available\n");
-#endif
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 6e75c15..814f593 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -109,24 +109,6 @@ enum rte_ring_queue_behavior {
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};

-#ifdef RTE_LIBRTE_RING_DEBUG
-/**
- * A structure that stores the ring statistics (per-lcore).
- */
-struct rte_ring_debug_stats {
- uint64_t enq_success_bulk; /**< Successful enqueues number. */
- uint64_t enq_success_objs; /**< Objects successfully enqueued. */
- uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
- uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
- uint64_t enq_fail_bulk; /**< Failed enqueues number. */
- uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
- uint64_t deq_success_bulk; /**< Successful dequeues number. */
- uint64_t deq_success_objs; /**< Objects successfully dequeued. */
- uint64_t deq_fail_bulk; /**< Failed dequeues number. */
- uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
-} __rte_cache_aligned;
-#endif
-
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
@@ -179,10 +161,6 @@ struct rte_ring {
/** Ring consumer status. */
struct rte_ring_ht_ptr cons __rte_aligned(RTE_CACHE_LINE_SIZE * 2);

-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
-#endif
-
void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
* not volatile so need to be careful
* about compiler re-ordering */
@@ -194,27 +172,6 @@ struct rte_ring {
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
- * @internal When debug is enabled, store ring statistics.
- * @param r
- * A pointer to the ring.
- * @param name
- * The name of the statistics field to increment in the ring.
- * @param n
- * The number to add to the object-oriented statistics.
- */
-#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- if (__lcore_id < RTE_MAX_LCORE) { \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
- } \
- } while(0)
-#else
-#define __RING_STAT_ADD(r, name, n) do {} while(0)
-#endif
-
-/**
* Calculate the memory size needed for a ring
*
* This function returns the number of bytes needed for a ring, given
@@ -455,17 +412,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -480,15 +432,11 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

/*
* If there are other enqueues in progress that preceded us,
@@ -552,17 +500,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -575,15 +518,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

r->prod.tail = prod_next;
return ret;
@@ -647,16 +586,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

/* Set the actual entries for dequeue */
if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -686,7 +620,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
sched_yield();
}
}
- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -733,16 +666,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = prod_tail - cons_head;

if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -754,7 +682,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
}
--
2.9.3
Bruce Richardson
2017-02-23 17:23:58 UTC
Permalink
There was a compile time setting to enable a ring to yield when
it entered a loop in mp or mc rings waiting for the tail pointer update.
Build time settings are not recommended for enabling/disabling features,
and since this was off by default, remove it completely. If needed, a
runtime enabled equivalent can be used.

Signed-off-by: Bruce Richardson <***@intel.com>
---
config/common_base | 1 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 ----
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.h | 35 +++++--------------------
4 files changed, 7 insertions(+), 35 deletions(-)

diff --git a/config/common_base b/config/common_base
index b3d8272..d5beadd 100644
--- a/config/common_base
+++ b/config/common_base
@@ -447,7 +447,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
# Compile librte_mempool
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 10a10a8..7c39cd2 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -352,11 +352,6 @@ Known Issues

3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.

- ``RTE_RING_PAUSE_REP_COUNT`` is defined for rte_ring to reduce contention. It's mainly for case 2, a yield is issued after number of times pause repeat.
-
- It adds a sched_yield() syscall if the thread spins for too long while waiting on the other thread to finish its operations on the ring.
- This gives the preempted thread a chance to proceed and finish with the ring enqueue/dequeue operation.
-
+ rte_timer

Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e0ebd71..c69ca8f 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -117,6 +117,7 @@ API Changes

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
+ * removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 814f593..0f95c84 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -114,11 +114,6 @@ enum rte_ring_queue_behavior {
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)

-#ifndef RTE_RING_PAUSE_REP_COUNT
-#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
- * if RTE_RING_PAUSE_REP not defined. */
-#endif
-
struct rte_memzone; /* forward declaration, so as not to require memzone.h */

/* structure to hold a pair of head/tail values and other metadata */
@@ -388,7 +383,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -442,18 +437,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->prod.tail != prod_head)) {
+ while (unlikely(r->prod.tail != prod_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->prod.tail = prod_next;
return ret;
}
@@ -486,7 +472,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -563,7 +549,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -608,18 +594,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* If there are other dequeues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->cons.tail != cons_head)) {
+ while (unlikely(r->cons.tail != cons_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -654,7 +631,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-02-23 17:23:59 UTC
Permalink
Remove the watermark support. A future commit will add support for having
enqueue functions return the amount of free space in the ring, which will
allow applications to implement their own watermark checks, while also
being more useful to the app.

Signed-off-by: Bruce Richardson <***@intel.com>
---
app/test/commands.c | 52 ------------
app/test/test_ring.c | 149 +--------------------------------
doc/guides/rel_notes/release_17_05.rst | 2 +
examples/Makefile | 2 +-
lib/librte_ring/rte_ring.c | 23 -----
lib/librte_ring/rte_ring.h | 58 +------------
6 files changed, 8 insertions(+), 278 deletions(-)

diff --git a/app/test/commands.c b/app/test/commands.c
index 2df46b0..551c81d 100644
--- a/app/test/commands.c
+++ b/app/test/commands.c
@@ -228,57 +228,6 @@ cmdline_parse_inst_t cmd_dump_one = {

/****************/

-struct cmd_set_ring_result {
- cmdline_fixed_string_t set;
- cmdline_fixed_string_t name;
- uint32_t value;
-};
-
-static void cmd_set_ring_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_set_ring_result *res = parsed_result;
- struct rte_ring *r;
- int ret;
-
- r = rte_ring_lookup(res->name);
- if (r == NULL) {
- cmdline_printf(cl, "Cannot find ring\n");
- return;
- }
-
- if (!strcmp(res->set, "set_watermark")) {
- ret = rte_ring_set_water_mark(r, res->value);
- if (ret != 0)
- cmdline_printf(cl, "Cannot set water mark\n");
- }
-}
-
-cmdline_parse_token_string_t cmd_set_ring_set =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set,
- "set_watermark");
-
-cmdline_parse_token_string_t cmd_set_ring_name =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL);
-
-cmdline_parse_token_num_t cmd_set_ring_value =
- TOKEN_NUM_INITIALIZER(struct cmd_set_ring_result, value, UINT32);
-
-cmdline_parse_inst_t cmd_set_ring = {
- .f = cmd_set_ring_parsed, /* function to call */
- .data = NULL, /* 2nd arg of func */
- .help_str = "set watermark: "
- "set_watermark <ring_name> <value>",
- .tokens = { /* token list, NULL terminated */
- (void *)&cmd_set_ring_set,
- (void *)&cmd_set_ring_name,
- (void *)&cmd_set_ring_value,
- NULL,
- },
-};
-
-/****************/
-
struct cmd_quit_result {
cmdline_fixed_string_t quit;
};
@@ -419,7 +368,6 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_autotest,
(cmdline_parse_inst_t *)&cmd_dump,
(cmdline_parse_inst_t *)&cmd_dump_one,
- (cmdline_parse_inst_t *)&cmd_set_ring,
(cmdline_parse_inst_t *)&cmd_quit,
(cmdline_parse_inst_t *)&cmd_set_rxtx,
(cmdline_parse_inst_t *)&cmd_set_rxtx_anchor,
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index 3891f5d..666a451 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -78,21 +78,6 @@
* - Dequeue one object, two objects, MAX_BULK objects
* - Check that dequeued pointers are correct
*
- * - Test watermark and default bulk enqueue/dequeue:
- *
- * - Set watermark
- * - Set default bulk value
- * - Enqueue objects, check that -EDQUOT is returned when
- * watermark is exceeded
- * - Check that dequeued pointers are correct
- *
- * #. Check live watermark change
- *
- * - Start a loop on another lcore that will enqueue and dequeue
- * objects in a ring. It will monitor the value of watermark.
- * - At the same time, change the watermark on the master lcore.
- * - The slave lcore will check that watermark changes from 16 to 32.
- *
* #. Performance tests.
*
* Tests done in test_ring_perf.c
@@ -115,123 +100,6 @@ static struct rte_ring *r;

#define TEST_RING_FULL_EMTPY_ITER 8

-static int
-check_live_watermark_change(__attribute__((unused)) void *dummy)
-{
- uint64_t hz = rte_get_timer_hz();
- void *obj_table[MAX_BULK];
- unsigned watermark, watermark_old = 16;
- uint64_t cur_time, end_time;
- int64_t diff = 0;
- int i, ret;
- unsigned count = 4;
-
- /* init the object table */
- memset(obj_table, 0, sizeof(obj_table));
- end_time = rte_get_timer_cycles() + (hz / 4);
-
- /* check that bulk and watermark are 4 and 32 (respectively) */
- while (diff >= 0) {
-
- /* add in ring until we reach watermark */
- ret = 0;
- for (i = 0; i < 16; i ++) {
- if (ret != 0)
- break;
- ret = rte_ring_enqueue_bulk(r, obj_table, count);
- }
-
- if (ret != -EDQUOT) {
- printf("Cannot enqueue objects, or watermark not "
- "reached (ret=%d)\n", ret);
- return -1;
- }
-
- /* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->watermark;
- if (watermark != watermark_old &&
- (watermark_old != 16 || watermark != 32)) {
- printf("Bad watermark change %u -> %u\n", watermark_old,
- watermark);
- return -1;
- }
- watermark_old = watermark;
-
- /* dequeue objects from ring */
- while (i--) {
- ret = rte_ring_dequeue_bulk(r, obj_table, count);
- if (ret != 0) {
- printf("Cannot dequeue (ret=%d)\n", ret);
- return -1;
- }
- }
-
- cur_time = rte_get_timer_cycles();
- diff = end_time - cur_time;
- }
-
- if (watermark_old != 32 ) {
- printf(" watermark was not updated (wm=%u)\n",
- watermark_old);
- return -1;
- }
-
- return 0;
-}
-
-static int
-test_live_watermark_change(void)
-{
- unsigned lcore_id = rte_lcore_id();
- unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
-
- printf("Test watermark live modification\n");
- rte_ring_set_water_mark(r, 16);
-
- /* launch a thread that will enqueue and dequeue, checking
- * watermark and quota */
- rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
-
- rte_delay_ms(100);
- rte_ring_set_water_mark(r, 32);
- rte_delay_ms(100);
-
- if (rte_eal_wait_lcore(lcore_id2) < 0)
- return -1;
-
- return 0;
-}
-
-/* Test for catch on invalid watermark values */
-static int
-test_set_watermark( void ){
- unsigned count;
- int setwm;
-
- struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
- if(r == NULL){
- printf( " ring lookup failed\n" );
- goto error;
- }
- count = r->size * 2;
- setwm = rte_ring_set_water_mark(r, count);
- if (setwm != -EINVAL){
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
-
- count = 0;
- rte_ring_set_water_mark(r, count);
- if (r->watermark != r->size) {
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
- return 0;
-
-error:
- return -1;
-}
-
/*
* helper routine for test_ring_basic
*/
@@ -418,8 +286,7 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- printf("test watermark and default bulk enqueue / dequeue\n");
- rte_ring_set_water_mark(r, 20);
+ printf("test default bulk enqueue / dequeue\n");
num_elems = 16;

cur_src = src;
@@ -433,8 +300,8 @@ test_ring_basic(void)
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != -EDQUOT) {
- printf("Watermark not exceeded\n");
+ if (ret != 0) {
+ printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
@@ -930,16 +797,6 @@ test_ring(void)
return -1;

/* basic operations */
- if (test_live_watermark_change() < 0)
- return -1;
-
- if ( test_set_watermark() < 0){
- printf ("Test failed to detect invalid parameter\n");
- return -1;
- }
- else
- printf ( "Test detected forced bad watermark values\n");
-
if ( test_create_count_odd() < 0){
printf ("Test failed to detect odd count\n");
return -1;
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index c69ca8f..4e748dc 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -118,6 +118,8 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
+ * removed the function ``rte_ring_set_water_mark`` as part of a general
+ removal of watermarks support in the library.

ABI Changes
-----------
diff --git a/examples/Makefile b/examples/Makefile
index da2bfdd..19cd5ad 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-DIRS-y += quota_watermark
+#DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 90ee63f..18fb644 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,7 +138,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->watermark = count;
r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
r->size = count;
@@ -256,24 +255,6 @@ rte_ring_free(struct rte_ring *r)
rte_free(te);
}

-/*
- * change the high water mark. If *count* is 0, water marking is
- * disabled
- */
-int
-rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
-{
- if (count >= r->size)
- return -EINVAL;
-
- /* if count is 0, disable the watermarking */
- if (count == 0)
- count = r->size;
-
- r->watermark = count;
- return 0;
-}
-
/* dump the status of the ring on the console */
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
@@ -287,10 +268,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->watermark == r->size)
- fprintf(f, " watermark=0\n");
- else
- fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 0f95c84..e5fc751 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -148,7 +148,6 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */
uint32_t size; /**< Size of ring. */
uint32_t mask; /**< Mask (size-1) of ring. */
- uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_ht_ptr prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
@@ -163,7 +162,6 @@ struct rte_ring {

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
-#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
@@ -269,26 +267,6 @@ struct rte_ring *rte_ring_create(const char *name, unsigned count,
void rte_ring_free(struct rte_ring *r);

/**
- * Change the high water mark.
- *
- * If *count* is 0, water marking is disabled. Otherwise, it is set to the
- * *count* value. The *count* value must be greater than 0 and less
- * than the ring size.
- *
- * This function can be called at any time (not necessarily at
- * initialization).
- *
- * @param r
- * A pointer to the ring structure.
- * @param count
- * The new water mark value.
- * @return
- * - 0: Success; water mark changed.
- * - -EINVAL: Invalid water mark value.
- */
-int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
-
-/**
* Dump the status of the ring to a file.
*
* @param f
@@ -369,8 +347,6 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -385,7 +361,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
int success;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -426,13 +401,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
/*
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
@@ -441,7 +409,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -460,8 +428,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -474,7 +440,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_next, free_entries;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

prod_head = r->prod.head;
cons_tail = r->cons.tail;
@@ -503,15 +468,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -677,8 +635,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -699,8 +655,6 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -725,8 +679,6 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -751,8 +703,6 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -770,8 +720,6 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -793,8 +741,6 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
--
2.9.3
Olivier Matz
2017-03-01 10:34:57 UTC
Permalink
Post by Bruce Richardson
Remove the watermark support. A future commit will add support for having
enqueue functions return the amount of free space in the ring, which will
allow applications to implement their own watermark checks, while also
being more useful to the app.
---
app/test/commands.c | 52 ------------
app/test/test_ring.c | 149 +--------------------------------
doc/guides/rel_notes/release_17_05.rst | 2 +
examples/Makefile | 2 +-
lib/librte_ring/rte_ring.c | 23 -----
lib/librte_ring/rte_ring.h | 58 +------------
6 files changed, 8 insertions(+), 278 deletions(-)
There are some other references to remove:

app/test/autotest_test_funcs.py: child.sendline("set_watermark test 100")
app/test/autotest_test_funcs.py: index = child.expect([" watermark=100",
app/test/autotest_test_funcs.py: return -1, "Fail [Bad watermark]"

doc/guides/prog_guide/ring_lib.rst:Water Marking
doc/guides/prog_guide/ring_lib.rst:The ring can have a high water mark (threshold).
doc/guides/prog_guide/ring_lib.rst:Once an enqueue operation reaches the high water mark, the producer is notified, if the water mark is configured.
Bruce Richardson
2017-03-01 10:43:43 UTC
Permalink
Post by Olivier Matz
Post by Bruce Richardson
Remove the watermark support. A future commit will add support for having
enqueue functions return the amount of free space in the ring, which will
allow applications to implement their own watermark checks, while also
being more useful to the app.
---
app/test/commands.c | 52 ------------
app/test/test_ring.c | 149 +--------------------------------
doc/guides/rel_notes/release_17_05.rst | 2 +
examples/Makefile | 2 +-
lib/librte_ring/rte_ring.c | 23 -----
lib/librte_ring/rte_ring.h | 58 +------------
6 files changed, 8 insertions(+), 278 deletions(-)
app/test/autotest_test_funcs.py: child.sendline("set_watermark test 100")
app/test/autotest_test_funcs.py: index = child.expect([" watermark=100",
app/test/autotest_test_funcs.py: return -1, "Fail [Bad watermark]"
doc/guides/prog_guide/ring_lib.rst:Water Marking
doc/guides/prog_guide/ring_lib.rst:The ring can have a high water mark (threshold).
doc/guides/prog_guide/ring_lib.rst:Once an enqueue operation reaches the high water mark, the producer is notified, if the water mark is configured.
Yep, good catch. Will include in v2

/Bruce
Bruce Richardson
2017-02-23 17:24:00 UTC
Permalink
The bulk fns for rings returns 0 for all elements enqueued and negative
for no space. Change that to make them consistent with the burst functions
in returning the number of elements enqueued/dequeued, i.e. 0 or N.
This change also allows the return value from enq/deq to be used directly
without a branch for error checking.

Signed-off-by: Bruce Richardson <***@intel.com>
---
app/test-pipeline/pipeline_hash.c | 2 +-
app/test-pipeline/runtime.c | 8 +-
app/test/test_ring.c | 46 +++++----
app/test/test_ring_perf.c | 8 +-
doc/guides/rel_notes/release_17_05.rst | 11 +++
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
examples/load_balancer/runtime.c | 16 ++-
.../client_server_mp/mp_client/client.c | 8 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/qos_sched/app_thread.c | 8 +-
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 12 ++-
lib/librte_ring/rte_ring.h | 109 +++++++--------------
14 files changed, 106 insertions(+), 130 deletions(-)

diff --git a/app/test-pipeline/pipeline_hash.c b/app/test-pipeline/pipeline_hash.c
index 10d2869..1ac0aa8 100644
--- a/app/test-pipeline/pipeline_hash.c
+++ b/app/test-pipeline/pipeline_hash.c
@@ -547,6 +547,6 @@ app_main_loop_rx_metadata(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}
diff --git a/app/test-pipeline/runtime.c b/app/test-pipeline/runtime.c
index 42a6142..4e20669 100644
--- a/app/test-pipeline/runtime.c
+++ b/app/test-pipeline/runtime.c
@@ -98,7 +98,7 @@ app_main_loop_rx(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -123,7 +123,7 @@ app_main_loop_worker(void) {
(void **) worker_mbuf->array,
app.burst_size_worker_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

do {
@@ -131,7 +131,7 @@ app_main_loop_worker(void) {
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
app.burst_size_worker_write);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -152,7 +152,7 @@ app_main_loop_tx(void) {
(void **) &app.mbuf_tx[i].array[n_mbufs],
app.burst_size_tx_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

n_mbufs += app.burst_size_tx_read;
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index 666a451..112433b 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -117,20 +117,18 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rand));
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rsz));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -171,37 +169,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -217,37 +215,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -264,11 +262,11 @@ test_ring_basic(void)
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
}

@@ -294,25 +292,25 @@ test_ring_basic(void)

ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue2\n");
goto fail;
}
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index 320c20c..8ccbdef 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 4e748dc..2b11765 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -120,6 +120,17 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * changed the return value of the enqueue and dequeue bulk functions to
+ match that of the burst equivalents. In all cases, ring functions which
+ operate on multiple packets now return the number of elements enqueued
+ or dequeued, as appropriate. The updated functions are:
+
+ - ``rte_ring_mp_enqueue_bulk``
+ - ``rte_ring_sp_enqueue_bulk``
+ - ``rte_ring_enqueue_bulk``
+ - ``rte_ring_mc_dequeue_bulk``
+ - ``rte_ring_sc_dequeue_bulk``
+ - ``rte_ring_dequeue_bulk``

ABI Changes
-----------
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
index 9b69cfe..e3a63c8 100644
--- a/doc/guides/sample_app_ug/server_node_efd.rst
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -286,7 +286,7 @@ repeated infinitely.

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 6944325..82b10bc 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -146,7 +146,7 @@ app_lcore_io_rx_buffer_to_send (
(void **) lp->rx.mbuf_out[worker].array,
bsz);

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -312,7 +312,7 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
(void **) lp->rx.mbuf_out[worker].array,
lp->rx.mbuf_out[worker].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -349,9 +349,8 @@ app_lcore_io_tx(
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

n_mbufs += bsz_rd;

@@ -505,9 +504,8 @@ app_lcore_worker(
(void **) lp->mbuf_in.array,
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@ -559,7 +557,7 @@ app_lcore_worker(

#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@ -572,7 +570,7 @@ app_lcore_worker(
}
#endif

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -609,7 +607,7 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
(void **) lp->mbuf_out[port].array,
lp->mbuf_out[port].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index d4f9ca3..dca9eb9 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -276,14 +276,10 @@ main(int argc, char *argv[])
printf("[Press Ctrl-C to quit ...]\n");

for (;;) {
- uint16_t i, rx_pkts = PKT_READ_SIZE;
+ uint16_t i, rx_pkts;
uint8_t port;

- /* try dequeuing max possible packets first, if that fails, get the
- * most we can. Loop body should only execute once, maximum */
- while (rx_pkts > 0 &&
- unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
- rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index a6dc12d..19c95b2 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) != 0){
+ cl_rx_buf[client].count) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 70fdcdb..dab4594 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) != 0)) {
+ (void **)rx_mbufs, nb_rx) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -180,7 +180,7 @@ app_tx_thread(struct thread_conf **confs)
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
burst_conf.qos_dequeue);
- if (likely(retval == 0)) {
+ if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

conf->counter = 0; /* reset empty read loop counter */
@@ -230,7 +230,9 @@ app_worker_thread(struct thread_conf **confs)
nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
- while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
+ (void **)mbufs, nb_pkt) == 0)
+ ; /* empty body */

conf_idx++;
if (confs[conf_idx] == NULL)
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index a6c0c70..9ec6a05 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) != 0))
+ rx_pkts) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 1a54d1b..3eb7fac 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index b9aa64d..409b860 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -42,26 +42,30 @@ static int
common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index e5fc751..6712f1f 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -344,14 +344,10 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -383,7 +379,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -409,7 +405,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -425,14 +421,10 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -452,7 +444,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -469,7 +461,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -490,16 +482,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/

-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -531,7 +518,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
/* Set the actual entries for dequeue */
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -557,7 +544,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

r->cons.tail = cons_next;

- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -575,15 +562,10 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -602,7 +584,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,

if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -618,7 +600,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -634,10 +616,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -654,10 +635,9 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -678,10 +658,9 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -708,7 +687,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -725,7 +704,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -746,10 +725,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue(r, obj);
- else
- return rte_ring_mp_enqueue(r, obj);
+ return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -765,11 +741,9 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -786,11 +760,9 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -810,11 +782,9 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue, no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
if (r->cons.sc_dequeue)
@@ -841,7 +811,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -859,7 +829,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -881,10 +851,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue(r, obj_p);
- else
- return rte_ring_mc_dequeue(r, obj_p);
+ return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
--
2.9.3
Bruce Richardson
2017-02-23 17:24:01 UTC
Permalink
Add an extra parameter to the ring enqueue burst/bulk functions so that
those functions can optionally return the amount of free space in the
ring. This information can be used by applications in a number of ways,
for instance, with single-producer queues, it provides a max
enqueue size which is guaranteed to work. It can also be used to
implement watermark functionality in apps, replacing the older
functionality with a more flexible version, which enables apps to
implement multiple watermark thresholds, rather than just one.

Signed-off-by: Bruce Richardson <***@intel.com>
---
app/test-pipeline/pipeline_hash.c | 3 +-
app/test-pipeline/runtime.c | 5 +-
app/test/test_link_bonding_mode4.c | 3 +-
app/test/test_pmd_ring_perf.c | 5 +-
app/test/test_ring.c | 55 ++++++-------
app/test/test_ring_perf.c | 16 ++--
app/test/test_table_ports.c | 4 +-
app/test/virtual_pmd.c | 4 +-
doc/guides/rel_notes/release_17_05.rst | 3 +
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 3 +-
examples/load_balancer/runtime.c | 12 ++-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 7 +-
examples/qos_sched/app_thread.c | 4 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 28 ++++---
lib/librte_ring/rte_ring.h | 89 +++++++++++-----------
22 files changed, 139 insertions(+), 118 deletions(-)

diff --git a/app/test-pipeline/pipeline_hash.c b/app/test-pipeline/pipeline_hash.c
index 1ac0aa8..0c6e04f 100644
--- a/app/test-pipeline/pipeline_hash.c
+++ b/app/test-pipeline/pipeline_hash.c
@@ -546,7 +546,8 @@ app_main_loop_rx_metadata(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs,
+ NULL);
} while (ret == 0);
}
}
diff --git a/app/test-pipeline/runtime.c b/app/test-pipeline/runtime.c
index 4e20669..c06ff54 100644
--- a/app/test-pipeline/runtime.c
+++ b/app/test-pipeline/runtime.c
@@ -97,7 +97,7 @@ app_main_loop_rx(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs, NULL);
} while (ret == 0);
}
}
@@ -130,7 +130,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
- app.burst_size_worker_write);
+ app.burst_size_worker_write,
+ NULL);
} while (ret == 0);
}
}
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 53caa3e..8df28b4 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -206,7 +206,8 @@ slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
static int
slave_put_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf, size);
+ return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf,
+ size, NULL);
}

static uint16_t
diff --git a/app/test/test_pmd_ring_perf.c b/app/test/test_pmd_ring_perf.c
index af011f7..045a7f2 100644
--- a/app/test/test_pmd_ring_perf.c
+++ b/app/test/test_pmd_ring_perf.c
@@ -98,7 +98,7 @@ test_single_enqueue_dequeue(void)
const uint64_t sc_start = rte_rdtsc_precise();
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
- rte_ring_enqueue_bulk(r, &burst, 1);
+ rte_ring_enqueue_bulk(r, &burst, 1, NULL);
rte_ring_dequeue_bulk(r, &burst, 1);
}
const uint64_t sc_end = rte_rdtsc_precise();
@@ -131,7 +131,8 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index 112433b..b0ca88b 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -117,11 +117,12 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
+ NULL) != 0);
TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
@@ -167,19 +168,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -213,19 +214,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -260,7 +261,7 @@ test_ring_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -290,13 +291,13 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
@@ -371,19 +372,19 @@ test_ring_burst_basic(void)

printf("Test SP & SC basic functions \n");
printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK) ;
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -419,7 +420,7 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
goto fail;
@@ -427,14 +428,14 @@ test_ring_burst_basic(void)
}

printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
/* Always one free entry left */
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -444,7 +445,7 @@ test_ring_burst_basic(void)
goto fail;

printf("Test enqueue for a full entry \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;

@@ -486,19 +487,19 @@ test_ring_burst_basic(void)
printf("Test MP & MC basic functions \n");

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -534,7 +535,7 @@ test_ring_burst_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -557,19 +558,19 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK objects */
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -607,7 +608,7 @@ test_ring_burst_basic(void)

printf("Covering rte_ring_enqueue_burst functions \n");

- ret = rte_ring_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
@@ -746,7 +747,7 @@ test_ring_basic_ex(void)
}

/* Covering the ring burst operation */
- ret = rte_ring_enqueue_burst(rp, obj, 2);
+ ret = rte_ring_enqueue_burst(rp, obj, 2, NULL);
if ((ret & RTE_RING_SZ_MASK) != 2) {
printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
goto fail_test;
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index 8ccbdef..f95a8e9 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -323,14 +323,16 @@ test_burst_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
@@ -357,14 +359,16 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
diff --git a/app/test/test_table_ports.c b/app/test/test_table_ports.c
index 2532367..395f4f3 100644
--- a/app/test/test_table_ports.c
+++ b/app/test/test_table_ports.c
@@ -80,7 +80,7 @@ test_port_ring_reader(void)
mbuf[0] = (void *)rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- mbuf, 1);
+ mbuf, 1, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);

if (received_pkts < expected_pkts)
@@ -93,7 +93,7 @@ test_port_ring_reader(void)
mbuf[i] = rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
RTE_PORT_IN_BURST_SIZE_MAX);

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 6e4dcd8..39e070c 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -380,7 +380,7 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
nb_pkts = 0;
else
nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increment opacket count */
dev_private->eth_stats.opackets += nb_pkts;
@@ -496,7 +496,7 @@ virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
vrtl_eth_dev->data->dev_private;

return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

int
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 2b11765..249ad6e 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -120,6 +120,9 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * added an extra parameter to the burst/bulk enqueue functions to
+ return the number of free spaces in the ring after enqueue. This can
+ be used by an application to implement its own watermark functionality.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 6f9cc1a..adbf478 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -102,7 +102,7 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SP_ENQ) {
r->tx_pkts.cnt += nb_tx;
r->err_pkts.cnt += nb_bufs - nb_tx;
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index e7641d2..cfd360b 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -238,7 +238,8 @@ lcore_rx(struct lcore_params *p)
continue;
}

- uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
+ uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs,
+ nb_ret, NULL);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
RTE_LOG_DP(DEBUG, DISTRAPP,
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 82b10bc..1645994 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -144,7 +144,8 @@ app_lcore_io_rx_buffer_to_send (
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- bsz);
+ bsz,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -310,7 +311,8 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- lp->rx.mbuf_out[worker].n_mbufs);
+ lp->rx.mbuf_out[worker].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -553,7 +555,8 @@ app_lcore_worker(
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- bsz_wr);
+ bsz_wr,
+ NULL);

#if APP_STATS
lp->rings_out_iters[port] ++;
@@ -605,7 +608,8 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- lp->mbuf_out[port].n_mbufs);
+ lp->mbuf_out[port].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index 19c95b2..c2b0261 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) == 0){
+ cl_rx_buf[client].count, NULL) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index d4dc789..d268350 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -421,8 +421,8 @@ rx_thread(struct rte_ring *ring_out)
pkts[i++]->seqn = seqn++;

/* enqueue to rx_to_workers ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
- nb_rx_pkts);
+ ret = rte_ring_enqueue_burst(ring_out,
+ (void *)pkts, nb_rx_pkts, NULL);
app_stats.rx.enqueue_pkts += ret;
if (unlikely(ret < nb_rx_pkts)) {
app_stats.rx.enqueue_failed_pkts +=
@@ -473,7 +473,8 @@ worker_thread(void *args_ptr)
burst_buffer[i++]->port ^= xor_val;

/* enqueue the modified mbufs to workers_to_tx ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
+ ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
+ burst_size, NULL);
__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
if (unlikely(ret < burst_size)) {
/* Return the mbufs to their respective pool, dropping packets */
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index dab4594..0c81a15 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) == 0)) {
+ (void **)rx_mbufs, nb_rx, NULL) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -231,7 +231,7 @@ app_worker_thread(struct thread_conf **confs)
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
- (void **)mbufs, nb_pkt) == 0)
+ (void **)mbufs, nb_pkt, NULL) == 0)
; /* empty body */

conf_idx++;
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 3eb7fac..597b4c2 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != cl_rx_buf[node].count){
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 51db006..6552199 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -808,7 +808,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
/* Need to enqueue the free slots in global ring. */
n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
cached_free_slots->objs,
- LCORE_CACHE_SIZE);
+ LCORE_CACHE_SIZE, NULL);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 409b860..9b8fd2b 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -43,7 +43,7 @@ common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_mp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
@@ -51,7 +51,7 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_sp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index a580a6a..d6d3e46 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -197,7 +197,7 @@ pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
dup_bufs[d_pkts++] = p;
}

- ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts);
+ ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
if (unlikely(ring_enq < d_pkts)) {
RTE_LOG(DEBUG, PDUMP,
"only %d of packets enqueued to ring\n", ring_enq);
diff --git a/lib/librte_port/rte_port_ras.c b/lib/librte_port/rte_port_ras.c
index c4bb508..4de0945 100644
--- a/lib/librte_port/rte_port_ras.c
+++ b/lib/librte_port/rte_port_ras.c
@@ -167,7 +167,7 @@ send_burst(struct rte_port_ring_writer_ras *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 3b9d3d0..9fadac7 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -241,7 +241,7 @@ send_burst(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -256,7 +256,7 @@ send_burst_mp(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -318,11 +318,11 @@ rte_port_ring_writer_tx_bulk_internal(void *port,

RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
- n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
- n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
@@ -517,7 +517,7 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -527,7 +527,8 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_sp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -550,7 +551,7 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -560,7 +561,8 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_mp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -633,10 +635,12 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
n_pkts_ok =
- rte_ring_mp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
n_pkts_ok =
- rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

if (n_pkts_ok >= n_pkts)
return 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 6712f1f..b5a995e 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -349,20 +349,16 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, prod_next;
uint32_t cons_tail, free_entries;
- const unsigned max = n;
+ const unsigned int max = n;
int success;
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move prod.head atomically */
do {
/* Reset n to the initial burst count */
@@ -377,16 +373,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = (mask + cons_tail - prod_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : free_entries;
+
+ if (n == 0)
+ goto end;

prod_next = prod_head + n;
success = rte_atomic32_cmpset(&r->prod.head, prod_head,
@@ -405,6 +397,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -426,7 +421,8 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
@@ -442,16 +438,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = mask + cons_tail - prod_head;

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+
+ if (n == 0)
+ goto end;
+

prod_next = prod_head + n;
r->prod.head = prod_next;
@@ -461,6 +453,9 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -620,9 +615,10 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -639,9 +635,10 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -662,12 +659,12 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
}

/**
@@ -687,7 +684,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -704,7 +701,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -725,7 +722,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -971,9 +968,10 @@ struct rte_ring *rte_ring_lookup(const char *name);
*/
static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -990,9 +988,10 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -1013,12 +1012,12 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_burst(r, obj_table, n);
+ return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_burst(r, obj_table, n);
+ return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
}

/**
--
2.9.3
Bruce Richardson
2017-02-23 17:24:02 UTC
Permalink
Add an extra parameter to the ring dequeue burst/bulk functions so that
those functions can optionally return the amount of remaining objs in the
ring. This information can be used by applications in a number of ways,
for instance, with single-consumer queues, it provides a max
dequeue size which is guaranteed to work.

Signed-off-by: Bruce Richardson <***@intel.com>
---
app/pdump/main.c | 2 +-
app/test-pipeline/runtime.c | 6 +-
app/test/test_link_bonding_mode4.c | 3 +-
app/test/test_pmd_ring_perf.c | 7 +-
app/test/test_ring.c | 54 ++++++-------
app/test/test_ring_perf.c | 20 +++--
app/test/test_table_acl.c | 2 +-
app/test/test_table_pipeline.c | 2 +-
app/test/test_table_ports.c | 8 +-
app/test/virtual_pmd.c | 4 +-
doc/guides/rel_notes/release_17_05.rst | 8 ++
drivers/crypto/null/null_crypto_pmd.c | 2 +-
drivers/net/bonding/rte_eth_bond_pmd.c | 3 +-
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 2 +-
examples/load_balancer/runtime.c | 6 +-
.../client_server_mp/mp_client/client.c | 3 +-
examples/packet_ordering/main.c | 6 +-
examples/qos_sched/app_thread.c | 6 +-
examples/quota_watermark/qw/main.c | 5 +-
examples/server_node_efd/node/node.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 3 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_port/rte_port_frag.c | 3 +-
lib/librte_port/rte_port_ring.c | 6 +-
lib/librte_ring/rte_ring.h | 90 +++++++++++-----------
26 files changed, 145 insertions(+), 114 deletions(-)

diff --git a/app/pdump/main.c b/app/pdump/main.c
index b88090d..3b13753 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -496,7 +496,7 @@ pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)

/* first dequeue packets from ring of primary process */
const uint16_t nb_in_deq = rte_ring_dequeue_burst(ring,
- (void *)rxtx_bufs, BURST_SIZE);
+ (void *)rxtx_bufs, BURST_SIZE, NULL);
stats->dequeue_pkts += nb_in_deq;

if (nb_in_deq) {
diff --git a/app/test-pipeline/runtime.c b/app/test-pipeline/runtime.c
index c06ff54..8970e1c 100644
--- a/app/test-pipeline/runtime.c
+++ b/app/test-pipeline/runtime.c
@@ -121,7 +121,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_rx[i],
(void **) worker_mbuf->array,
- app.burst_size_worker_read);
+ app.burst_size_worker_read,
+ NULL);

if (ret == 0)
continue;
@@ -151,7 +152,8 @@ app_main_loop_tx(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_tx[i],
(void **) &app.mbuf_tx[i].array[n_mbufs],
- app.burst_size_tx_read);
+ app.burst_size_tx_read,
+ NULL);

if (ret == 0)
continue;
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 8df28b4..15091b1 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -193,7 +193,8 @@ static uint8_t lacpdu_rx_count[RTE_MAX_ETHPORTS] = {0, };
static int
slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf, size);
+ return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf,
+ size, NULL);
}

/*
diff --git a/app/test/test_pmd_ring_perf.c b/app/test/test_pmd_ring_perf.c
index 045a7f2..004882a 100644
--- a/app/test/test_pmd_ring_perf.c
+++ b/app/test/test_pmd_ring_perf.c
@@ -67,7 +67,7 @@ test_empty_dequeue(void)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();

const uint64_t eth_start = rte_rdtsc();
@@ -99,7 +99,7 @@ test_single_enqueue_dequeue(void)
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
rte_ring_enqueue_bulk(r, &burst, 1, NULL);
- rte_ring_dequeue_bulk(r, &burst, 1);
+ rte_ring_dequeue_bulk(r, &burst, 1, NULL);
}
const uint64_t sc_end = rte_rdtsc_precise();
rte_compiler_barrier();
@@ -133,7 +133,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, (void *)burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index b0ca88b..858ebc1 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -119,7 +119,8 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
__func__, i, rand);
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
NULL) != 0);
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand,
+ NULL) == rand);

/* fill the ring */
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
@@ -129,7 +130,8 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz,
+ NULL) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -186,19 +188,19 @@ test_ring_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -232,19 +234,19 @@ test_ring_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -265,7 +267,7 @@ test_ring_basic(void)
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -303,13 +305,13 @@ test_ring_basic(void)
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue2\n");
@@ -390,19 +392,19 @@ test_ring_burst_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1) ;
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -451,19 +453,19 @@ test_ring_burst_basic(void)

printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK entries */
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -505,19 +507,19 @@ test_ring_burst_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -539,7 +541,7 @@ test_ring_burst_basic(void)
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -578,19 +580,19 @@ test_ring_burst_basic(void)

printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available objects - the exact MAX_BULK */
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -613,7 +615,7 @@ test_ring_burst_basic(void)
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret != 2)
goto fail;
@@ -753,7 +755,7 @@ test_ring_basic_ex(void)
goto fail_test;
}

- ret = rte_ring_dequeue_burst(rp, obj, 2);
+ ret = rte_ring_dequeue_burst(rp, obj, 2, NULL);
if (ret != 2) {
printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
goto fail_test;
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index f95a8e9..ed89896 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -152,12 +152,12 @@ test_empty_dequeue(void)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t mc_end = rte_rdtsc();

printf("SC empty dequeue: %.2F\n",
@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();

@@ -325,7 +325,8 @@ test_burst_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

@@ -333,7 +334,8 @@ test_burst_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();

@@ -361,7 +363,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

@@ -369,7 +372,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();

diff --git a/app/test/test_table_acl.c b/app/test/test_table_acl.c
index b3bfda4..4d43be7 100644
--- a/app/test/test_table_acl.c
+++ b/app/test/test_table_acl.c
@@ -713,7 +713,7 @@ test_pipeline_single_filter(int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;

- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0) {
printf("Got no objects from ring %d - error code %d\n",
i, ret);
diff --git a/app/test/test_table_pipeline.c b/app/test/test_table_pipeline.c
index 36bfeda..b58aa5d 100644
--- a/app/test/test_table_pipeline.c
+++ b/app/test/test_table_pipeline.c
@@ -494,7 +494,7 @@ test_pipeline_single_filter(int test_type, int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;

- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0)
printf("Got no objects from ring %d - error code %d\n",
i, ret);
diff --git a/app/test/test_table_ports.c b/app/test/test_table_ports.c
index 395f4f3..39592ce 100644
--- a/app/test/test_table_ports.c
+++ b/app/test/test_table_ports.c
@@ -163,7 +163,7 @@ test_port_ring_writer(void)
rte_port_ring_writer_ops.f_flush(port);
expected_pkts = 1;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -7;
@@ -178,7 +178,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -8;
@@ -193,7 +193,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -8;
@@ -208,7 +208,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -9;
diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 39e070c..b209355 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -342,7 +342,7 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
dev_private = vrtl_eth_dev->data->dev_private;

rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increments ipackets count */
dev_private->eth_stats.ipackets += rx_count;
@@ -508,7 +508,7 @@ virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,

dev_private = vrtl_eth_dev->data->dev_private;
return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

static uint8_t
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 249ad6e..563a74c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -123,6 +123,8 @@ API Changes
* added an extra parameter to the burst/bulk enqueue functions to
return the number of free spaces in the ring after enqueue. This can
be used by an application to implement its own watermark functionality.
+ * added an extra parameter to the burst/bulk dequeue functions to return
+ the number elements remaining in the ring after dequeue.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
@@ -135,6 +137,12 @@ API Changes
- ``rte_ring_sc_dequeue_bulk``
- ``rte_ring_dequeue_bulk``

+ NOTE: the above functions all have different parameters as well as
+ different return values, due to the other listed changes above. This
+ means that all instances of the functions in existing code will be
+ flagged by the compiler. The return value usage should be checked
+ while fixing the compiler error due to the extra parameter.
+
ABI Changes
-----------

diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index ed5a9fc..f68ec8d 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -155,7 +155,7 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index f3ac9e2..96638af 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1008,7 +1008,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
struct port *port = &mode_8023ad_ports[slaves[i]];

slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
+ slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
+ NULL);
slave_nb_pkts[i] = slave_slow_nb_pkts[i];

for (j = 0; j < slave_slow_nb_pkts[i]; j++)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index adbf478..77ef3a1 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -88,7 +88,7 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts.cnt += nb_rx;
else
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index cfd360b..5cb6185 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -330,7 +330,7 @@ lcore_tx(struct rte_ring *in_r)

struct rte_mbuf *bufs[BURST_SIZE];
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
- (void *)bufs, BURST_SIZE);
+ (void *)bufs, BURST_SIZE, NULL);
app_stats.tx.dequeue_pkts += nb_rx;

/* if we get no traffic, flush anything we have */
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 1645994..8192c08 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -349,7 +349,8 @@ app_lcore_io_tx(
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
- bsz_rd);
+ bsz_rd,
+ NULL);

if (unlikely(ret == 0))
continue;
@@ -504,7 +505,8 @@ app_lcore_worker(
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
- bsz_rd);
+ bsz_rd,
+ NULL);

if (unlikely(ret == 0))
continue;
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index dca9eb9..01b535c 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -279,7 +279,8 @@ main(int argc, char *argv[])
uint16_t i, rx_pkts;
uint8_t port;

- rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
+ PKT_READ_SIZE, NULL);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index d268350..7719dad 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -462,7 +462,7 @@ worker_thread(void *args_ptr)

/* dequeue the mbufs from rx_to_workers ring */
burst_size = rte_ring_dequeue_burst(ring_in,
- (void *)burst_buffer, MAX_PKTS_BURST);
+ (void *)burst_buffer, MAX_PKTS_BURST, NULL);
if (unlikely(burst_size == 0))
continue;

@@ -510,7 +510,7 @@ send_thread(struct send_thread_args *args)

/* deque the mbufs from workers_to_tx ring */
nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);

if (unlikely(nb_dq_mbufs == 0))
continue;
@@ -595,7 +595,7 @@ tx_thread(struct rte_ring *ring_in)

/* deque the mbufs from workers_to_tx ring */
dqnum = rte_ring_dequeue_burst(ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);

if (unlikely(dqnum == 0))
continue;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 0c81a15..15f117f 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -179,7 +179,7 @@ app_tx_thread(struct thread_conf **confs)

while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
- burst_conf.qos_dequeue);
+ burst_conf.qos_dequeue, NULL);
if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

@@ -218,7 +218,7 @@ app_worker_thread(struct thread_conf **confs)

/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
@@ -254,7 +254,7 @@ app_mixed_thread(struct thread_conf **confs)

/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 57df8ef..2dcddea 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -247,7 +247,8 @@ pipeline_stage(__attribute__((unused)) void *args)
}

/* Dequeue up to quota mbuf from rx */
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
if (unlikely(nb_dq_pkts < 0))
continue;

@@ -305,7 +306,7 @@ send_stage(__attribute__((unused)) void *args)

/* Dequeue packets from tx and send them */
nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
- (void *) tx_pkts, *quota);
+ (void *) tx_pkts, *quota, NULL);
rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);

/* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index 9ec6a05..f780b92 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) == 0))
+ rx_pkts, NULL) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6552199..645c0cf 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -536,7 +536,8 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
if (cached_free_slots->len == 0) {
/* Need to get another burst of free slots from global ring */
n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
- cached_free_slots->objs, LCORE_CACHE_SIZE);
+ cached_free_slots->objs,
+ LCORE_CACHE_SIZE, NULL);
if (n_slots == 0)
return -ENOSPC;

diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 9b8fd2b..5c132bf 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -58,14 +58,14 @@ static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_mc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_sc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_port/rte_port_frag.c b/lib/librte_port/rte_port_frag.c
index 0fcace9..320407e 100644
--- a/lib/librte_port/rte_port_frag.c
+++ b/lib/librte_port/rte_port_frag.c
@@ -186,7 +186,8 @@ rte_port_ring_reader_frag_rx(void *port,
/* If "pkts" buffer is empty, read packet burst from ring */
if (p->n_pkts == 0) {
p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
- (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX,
+ NULL);
RTE_PORT_RING_READER_FRAG_STATS_PKTS_IN_ADD(p, p->n_pkts);
if (p->n_pkts == 0)
return n_pkts_out;
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 9fadac7..492b0e7 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -111,7 +111,8 @@ rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;

- nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

return nb_rx;
@@ -124,7 +125,8 @@ rte_port_ring_multi_reader_rx(void *port, struct rte_mbuf **pkts,
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;

- nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

return nb_rx;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index b5a995e..afd5367 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -483,7 +483,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
@@ -492,11 +493,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
@@ -511,15 +507,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = (prod_tail - cons_head);

/* Set the actual entries for dequeue */
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(n == 0))
+ goto end;

cons_next = cons_head + n;
success = rte_atomic32_cmpset(&r->cons.head, cons_head,
@@ -538,7 +530,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_pause();

r->cons.tail = cons_next;
-
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}

@@ -562,7 +556,8 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
@@ -577,15 +572,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* and size(ring)-1. */
entries = prod_tail - cons_head;

- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(entries == 0))
+ goto end;

cons_next = cons_head + n;
r->cons.head = cons_next;
@@ -595,6 +586,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}

@@ -741,9 +735,11 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}

/**
@@ -760,9 +756,11 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}

/**
@@ -782,12 +780,13 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
+ unsigned int *available)
{
if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
}

/**
@@ -808,7 +807,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -826,7 +825,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -848,7 +847,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -1038,9 +1037,11 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}

/**
@@ -1058,9 +1059,11 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}

/**
@@ -1080,12 +1083,13 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* - Number of objects dequeued
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_burst(r, obj_table, n);
+ return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_burst(r, obj_table, n);
+ return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
}

#ifdef __cplusplus
--
2.9.3
Bruce Richardson
2017-02-23 17:24:03 UTC
Permalink
Now that the enqueue function returns the amount of space in the ring,
we can use that to replace the old watermark functionality. Update the
example app to do so, and re-enable it in the examples Makefile.

Signed-off-by: Bruce Richardson <***@intel.com>
---
examples/Makefile | 2 +-
examples/quota_watermark/qw/init.c | 5 +++--
examples/quota_watermark/qw/main.c | 16 ++++++++++------
examples/quota_watermark/qw/main.h | 1 +
examples/quota_watermark/qwctl/commands.c | 4 ++--
examples/quota_watermark/qwctl/qwctl.c | 2 ++
examples/quota_watermark/qwctl/qwctl.h | 1 +
7 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/examples/Makefile b/examples/Makefile
index 19cd5ad..da2bfdd 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-#DIRS-y += quota_watermark
+DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index 95a9f94..6babfea 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -140,7 +140,7 @@ void init_ring(int lcore_id, uint8_t port_id)
if (ring == NULL)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

- rte_ring_set_water_mark(ring, 80 * RING_SIZE / 100);
+ *high_watermark = 80 * RING_SIZE / 100;

rings[lcore_id][port_id] = ring;
}
@@ -168,10 +168,11 @@ setup_shared_variables(void)
const struct rte_memzone *qw_memzone;

qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
- 2 * sizeof(int), rte_socket_id(), 0);
+ 3 * sizeof(int), rte_socket_id(), 0);
if (qw_memzone == NULL)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

quota = qw_memzone->addr;
low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 2dcddea..bdb8a43 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -67,6 +67,7 @@ struct ether_fc_frame {

int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;

uint8_t port_pairs[RTE_MAX_ETHPORTS];

@@ -158,6 +159,7 @@ receive_stage(__attribute__((unused)) void *args)
uint16_t nb_rx_pkts;

unsigned int lcore_id;
+ unsigned int free;

struct rte_mbuf *pkts[MAX_PKT_QUOTA];
struct rte_ring *ring;
@@ -189,13 +191,13 @@ receive_stage(__attribute__((unused)) void *args)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
(uint16_t) *quota);
ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
- nb_rx_pkts);
- if (ret == -EDQUOT) {
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
ring_state[port_id] = RING_OVERLOADED;
send_pause_frame(port_id, 1337);
}

- else if (ret == -ENOBUFS) {
+ if (ret == 0) {

/*
* Return mbufs to the pool,
@@ -217,6 +219,7 @@ pipeline_stage(__attribute__((unused)) void *args)
uint8_t port_id;

unsigned int lcore_id, previous_lcore_id;
+ unsigned int free;

void *pkts[MAX_PKT_QUOTA];
struct rte_ring *rx, *tx;
@@ -253,11 +256,12 @@ pipeline_stage(__attribute__((unused)) void *args)
continue;

/* Enqueue them on tx */
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
ring_state[port_id] = RING_OVERLOADED;

- else if (ret == -ENOBUFS) {
+ if (ret == 0) {

/*
* Return mbufs to the pool,
diff --git a/examples/quota_watermark/qw/main.h b/examples/quota_watermark/qw/main.h
index 545ba42..8c8e311 100644
--- a/examples/quota_watermark/qw/main.h
+++ b/examples/quota_watermark/qw/main.h
@@ -43,6 +43,7 @@ enum ring_state {

extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;

extern uint8_t port_pairs[RTE_MAX_ETHPORTS];

diff --git a/examples/quota_watermark/qwctl/commands.c b/examples/quota_watermark/qwctl/commands.c
index 036bf80..5cac0e1 100644
--- a/examples/quota_watermark/qwctl/commands.c
+++ b/examples/quota_watermark/qwctl/commands.c
@@ -140,8 +140,8 @@ cmd_set_handler(__attribute__((unused)) void *parsed_result,
else
if (tokens->value >= *low_watermark * 100 / RING_SIZE
&& tokens->value <= 100)
- rte_ring_set_water_mark(ring,
- tokens->value * RING_SIZE / 100);
+ *high_watermark = tokens->value *
+ RING_SIZE / 100;
else
cmdline_printf(cl,
"ring high watermark must be between %u%% and 100%%\n",
diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c
index 3a85cc3..7e7a396 100644
--- a/examples/quota_watermark/qwctl/qwctl.c
+++ b/examples/quota_watermark/qwctl/qwctl.c
@@ -55,6 +55,7 @@

int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;


static void
@@ -68,6 +69,7 @@ setup_shared_variables(void)

quota = qw_memzone->addr;
low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}

int main(int argc, char **argv)
diff --git a/examples/quota_watermark/qwctl/qwctl.h b/examples/quota_watermark/qwctl/qwctl.h
index 8d146e5..545914b 100644
--- a/examples/quota_watermark/qwctl/qwctl.h
+++ b/examples/quota_watermark/qwctl/qwctl.h
@@ -36,5 +36,6 @@

extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;

#endif /* _MAIN_H_ */
--
2.9.3
Bruce Richardson
2017-02-23 17:24:04 UTC
Permalink
The local variable i is only used for loop control so define it in
the enqueue and dequeue blocks directly, rather than at the function
level.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.h | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index afd5367..db50ce9 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -280,6 +280,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
+ unsigned int i; \
const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
@@ -306,6 +307,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
+ unsigned int i; \
uint32_t idx = cons_head & mask; \
const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
@@ -356,7 +358,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned int max = n;
int success;
- unsigned int i;
uint32_t mask = r->mask;

/* move prod.head atomically */
@@ -426,7 +427,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned int i;
uint32_t mask = r->mask;

prod_head = r->prod.head;
@@ -490,7 +490,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned int i;
uint32_t mask = r->mask;

/* move cons.head atomically */
@@ -561,7 +560,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-02-23 17:24:05 UTC
Permalink
We can write a single common function for head manipulation for enq
and a common one for deq, allowing us to have a single worker function
for enq and deq, rather than two of each. Update all other inline
functions to use the new functions.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.c | 4 +-
lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------
2 files changed, 149 insertions(+), 183 deletions(-)

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 18fb644..4776079 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,8 +138,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
- r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
+ r->prod.sp_enqueue = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;
+ r->cons.sc_dequeue = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;
r->size = count;
r->mask = count - 1;
r->prod.head = r->cons.head = 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index db50ce9..d10b7d1 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -164,6 +164,12 @@ struct rte_ring {
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

+/* @internal defines for passing to the enqueue dequeue worker functions */
+#define __IS_SP 1
+#define __IS_MP 0
+#define __IS_SC 1
+#define __IS_MC 0
+
/**
* Calculate the memory size needed for a ring
*
@@ -282,7 +288,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
#define ENQUEUE_PTRS() do { \
unsigned int i; \
const uint32_t size = r->size; \
- uint32_t idx = prod_head & mask; \
+ uint32_t idx = prod_head & r->mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
r->ring[idx] = obj_table[i]; \
@@ -308,7 +314,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
unsigned int i; \
- uint32_t idx = cons_head & mask; \
+ uint32_t idx = cons_head & r->mask; \
const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
@@ -331,83 +337,72 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} while (0)

/**
- * @internal Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
+ * @internal This function updates the producer head for enqueue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to the ring structure
+ * @param is_sp
+ * Indicates whether multi-producer path is needed or not
* @param n
- * The number of objects to add in the ring from the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ * Returns the amount of free space in the ring BEFORE head was moved
* @return
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *free_entries)
{
- uint32_t prod_head, prod_next;
- uint32_t cons_tail, free_entries;
- const unsigned int max = n;
+ const uint32_t mask = r->mask;
+ unsigned int max = n;
int success;
- uint32_t mask = r->mask;

- /* move prod.head atomically */
do {
/* Reset n to the initial burst count */
n = max;

- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
+ *old_head = r->prod.head;
+ const uint32_t cons_tail = r->cons.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
+ * *old_head > cons_tail). So 'free_entries' is always between 0
* and size(ring)-1. */
- free_entries = (mask + cons_tail - prod_head);
+ *free_entries = (mask + cons_tail - *old_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries))
+ if (unlikely(n > *free_entries))
n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : free_entries;
+ 0 : *free_entries;

if (n == 0)
- goto end;
-
- prod_next = prod_head + n;
- success = rte_atomic32_cmpset(&r->prod.head, prod_head,
- prod_next);
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sp)
+ r->prod.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->prod.head,
+ *old_head, *new_head);
} while (unlikely(success == 0));
-
- /* write entries in ring */
- ENQUEUE_PTRS();
- rte_smp_wmb();
-
- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-end:
- if (free_space != NULL)
- *free_space = free_entries - n;
return n;
}

/**
- * @internal Enqueue several objects on a ring (NOT multi-producers safe).
+ * @internal Enqueue several objects on the ring
*
- * @param r
+ * @param r
* A pointer to the ring structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
@@ -415,44 +410,40 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param is_sp
+ * Indicates whether to use single producer or multi-producer head update
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
* @return
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- uint32_t mask = r->mask;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;

+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
if (n == 0)
goto end;

-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
ENQUEUE_PTRS();
rte_smp_wmb();

+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->prod.tail != prod_head))
+ rte_pause();
+
r->prod.tail = prod_next;
+
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -460,130 +451,112 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
}

/**
- * @internal Dequeue several objects from a ring (multi-consumers safe). When
- * the request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
+ * @internal This function updates the consumer head for dequeue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to the ring structure
+ * @param is_sc
+ * Indicates whether multi-consumer path is needed or not
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ * Returns the number of entries in the ring BEFORE head was moved
* @return
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *entries)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- const unsigned max = n;
+ unsigned int max = n;
int success;
- uint32_t mask = r->mask;

/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
n = max;

- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
+ *old_head = r->cons.head;
+ const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1. */
- entries = (prod_tail - cons_head);
+ *entries = (prod_tail - *old_head);

/* Set the actual entries for dequeue */
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;

if (unlikely(n == 0))
- goto end;
-
- cons_next = cons_head + n;
- success = rte_atomic32_cmpset(&r->cons.head, cons_head,
- cons_next);
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sc)
+ r->cons.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->cons.head, *old_head,
+ *new_head);
} while (unlikely(success == 0));
-
- /* copy in table */
- DEQUEUE_PTRS();
- rte_smp_rmb();
-
- /*
- * If there are other dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head))
- rte_pause();
-
- r->cons.tail = cons_next;
-end:
- if (available != NULL)
- *available = entries - n;
return n;
}

/**
- * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
- * When the request objects are more than the available objects, only dequeue
- * the actual number of objects
+ * @internal Dequeue several objects from the ring
*
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of void * pointers (objects).
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of objects to pull from the ring.
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param is_sc
+ * Indicates whether to use single consumer or multi-consumer head update
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
* @return
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+ int is_mp, unsigned int *available)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- uint32_t mask = r->mask;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = prod_tail - cons_head;
-
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
-
- if (unlikely(entries == 0))
- goto end;
+ uint32_t cons_head, cons_next;
+ uint32_t entries;

- cons_next = cons_head + n;
- r->cons.head = cons_next;
+ n = __rte_ring_move_cons_head(r, is_mp, n, behavior,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;

- /* copy in table */
DEQUEUE_PTRS();
rte_smp_rmb();

+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->cons.tail != cons_head))
+ rte_pause();
+
r->cons.tail = cons_next;
+
end:
if (available != NULL)
*available = entries - n;
@@ -609,8 +582,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MP, free_space);
}

/**
@@ -629,8 +602,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SP, free_space);
}

/**
@@ -653,10 +626,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
- else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->prod.sp_enqueue, free_space);
}

/**
@@ -736,8 +707,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MC, available);
}

/**
@@ -757,8 +728,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SC, available);
}

/**
@@ -781,10 +752,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
unsigned int *available)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
- else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->cons.sc_dequeue, available);
}

/**
@@ -967,8 +936,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
}

/**
@@ -987,8 +956,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
}

/**
@@ -1011,10 +980,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
- else
- return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
+ r->prod.sp_enqueue, free_space);
}

/**
@@ -1038,8 +1005,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
}

/**
@@ -1060,8 +1027,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
}

/**
@@ -1084,10 +1051,9 @@ static inline unsigned __attribute__((always_inline))
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
- else
- return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE,
+ r->cons.sc_dequeue, available);
}

#ifdef __cplusplus
--
2.9.3
Olivier MATZ
2017-03-08 10:49:06 UTC
Permalink
Post by Bruce Richardson
We can write a single common function for head manipulation for enq
and a common one for deq, allowing us to have a single worker function
for enq and deq, rather than two of each. Update all other inline
functions to use the new functions.
---
lib/librte_ring/rte_ring.c | 4 +-
lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------
2 files changed, 149 insertions(+), 183 deletions(-)
[...]
Post by Bruce Richardson
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- uint32_t mask = r->mask;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
if (n == 0)
goto end;
-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
ENQUEUE_PTRS();
rte_smp_wmb();
+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->prod.tail != prod_head))
+ rte_pause();
+
I'd say this part should not be done in case is_sp == 1.
Since it is sometimes a constant arg in an inline func, it may be better
to add the if (is_sp == 0).

[...]
Post by Bruce Richardson
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+ int is_mp, unsigned int *available)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- uint32_t mask = r->mask;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = prod_tail - cons_head;
-
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
-
- if (unlikely(entries == 0))
- goto end;
+ uint32_t cons_head, cons_next;
+ uint32_t entries;
- cons_next = cons_head + n;
- r->cons.head = cons_next;
+ n = __rte_ring_move_cons_head(r, is_mp, n, behavior,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;
- /* copy in table */
DEQUEUE_PTRS();
rte_smp_rmb();
+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->cons.tail != cons_head))
+ rte_pause();
+
r->cons.tail = cons_next;
Same here.
Bruce Richardson
2017-03-08 12:06:54 UTC
Permalink
Post by Olivier MATZ
Post by Bruce Richardson
We can write a single common function for head manipulation for enq
and a common one for deq, allowing us to have a single worker function
for enq and deq, rather than two of each. Update all other inline
functions to use the new functions.
---
lib/librte_ring/rte_ring.c | 4 +-
lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------
2 files changed, 149 insertions(+), 183 deletions(-)
[...]
Post by Bruce Richardson
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- uint32_t mask = r->mask;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
if (n == 0)
goto end;
-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
ENQUEUE_PTRS();
rte_smp_wmb();
+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->prod.tail != prod_head))
+ rte_pause();
+
I'd say this part should not be done in case is_sp == 1.
Since it is sometimes a constant arg in an inline func, it may be better
to add the if (is_sp == 0).
[...]
Yes, it's an unnecessary check. However, having it in place for the sp
case made no performance difference in my test, so I decided to keep
the code shorter by avoiding an additional branch. If there is a
performance hit I'll remove it, but I would rather not add more branches
to the code in the absense of a real impact to not having them.

Regards,
/Bruce
Olivier Matz
2017-03-14 08:56:31 UTC
Permalink
Post by Bruce Richardson
Post by Olivier MATZ
Post by Bruce Richardson
We can write a single common function for head manipulation for enq
and a common one for deq, allowing us to have a single worker function
for enq and deq, rather than two of each. Update all other inline
functions to use the new functions.
---
lib/librte_ring/rte_ring.c | 4 +-
lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------
2 files changed, 149 insertions(+), 183 deletions(-)
[...]
Post by Bruce Richardson
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- uint32_t mask = r->mask;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
if (n == 0)
goto end;
-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
ENQUEUE_PTRS();
rte_smp_wmb();
+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->prod.tail != prod_head))
+ rte_pause();
+
I'd say this part should not be done in case is_sp == 1.
Since it is sometimes a constant arg in an inline func, it may be better
to add the if (is_sp == 0).
[...]
Yes, it's an unnecessary check. However, having it in place for the sp
case made no performance difference in my test, so I decided to keep
the code shorter by avoiding an additional branch. If there is a
performance hit I'll remove it, but I would rather not add more branches
to the code in the absense of a real impact to not having them.
Ok.
Maybe it's worth checking the numbers given by the unit test.

Olivier
Bruce Richardson
2017-02-23 17:24:06 UTC
Permalink
Both producer and consumer use the same logic for updating the tail
index so merge into a single function.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.h | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index d10b7d1..6b901b1 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -336,6 +336,19 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
} while (0)

+static inline __attribute__((always_inline)) void
+update_tail(struct rte_ring_ht_ptr *ht_ptr, uint32_t old_val, uint32_t new_val)
+{
+ /*
+ * If there are other enqueues/dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(ht_ptr->tail != old_val))
+ rte_pause();
+
+ ht_ptr->tail = new_val;
+}
+
/**
* @internal This function updates the producer head for enqueue
*
@@ -435,15 +448,7 @@ __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-
+ update_tail(&r->prod, prod_head, prod_next);
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -548,14 +553,7 @@ __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head))
- rte_pause();
-
- r->cons.tail = cons_next;
+ update_tail(&r->cons, cons_head, cons_next);

end:
if (available != NULL)
--
2.9.3
Bruce Richardson
2017-02-23 17:24:07 UTC
Permalink
Modify the enqueue and dequeue macros to support copying any type of
object by passing in the exact object type. Rather than using the "ring"
structure member of rte_ring, which is of type "array of void *", instead
have the macros take the start of the ring a a pointer value, thereby
leaving the rte_ring structure as purely a header value. This allows it
to be reused by other future ring types which can add on extra fields if
they want, or even to have the actual ring elements, of whatever type
stored separate from the ring header.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.h | 68 ++++++++++++++++++++++++----------------------
1 file changed, 36 insertions(+), 32 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 6b901b1..1b04db1 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -154,11 +154,7 @@ struct rte_ring {

/** Ring consumer status. */
struct rte_ring_ht_ptr cons __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
-
- void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
- * not volatile so need to be careful
- * about compiler re-ordering */
-};
+} __rte_cache_aligned;

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
@@ -285,54 +281,62 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
/* the actual enqueue of pointers on the ring.
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
-#define ENQUEUE_PTRS() do { \
+#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
unsigned int i; \
- const uint32_t size = r->size; \
- uint32_t idx = prod_head & r->mask; \
+ const uint32_t size = (r)->size; \
+ uint32_t idx = prod_head & (r)->mask; \
+ obj_type *ring = (void *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
- r->ring[idx] = obj_table[i]; \
- r->ring[idx+1] = obj_table[i+1]; \
- r->ring[idx+2] = obj_table[i+2]; \
- r->ring[idx+3] = obj_table[i+3]; \
+ ring[idx] = obj_table[i]; \
+ ring[idx+1] = obj_table[i+1]; \
+ ring[idx+2] = obj_table[i+2]; \
+ ring[idx+3] = obj_table[i+3]; \
} \
switch (n & 0x3) { \
- case 3: r->ring[idx++] = obj_table[i++]; \
- case 2: r->ring[idx++] = obj_table[i++]; \
- case 1: r->ring[idx++] = obj_table[i++]; \
+ case 3: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 2: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 1: \
+ ring[idx++] = obj_table[i++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++)\
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
for (idx = 0; i < n; i++, idx++) \
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
} \
-} while(0)
+} while (0)

/* the actual copy of pointers on the ring to obj_table.
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
-#define DEQUEUE_PTRS() do { \
+#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
unsigned int i; \
- uint32_t idx = cons_head & r->mask; \
- const uint32_t size = r->size; \
+ uint32_t idx = cons_head & (r)->mask; \
+ const uint32_t size = (r)->size; \
+ obj_type *ring = (void *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
- obj_table[i] = r->ring[idx]; \
- obj_table[i+1] = r->ring[idx+1]; \
- obj_table[i+2] = r->ring[idx+2]; \
- obj_table[i+3] = r->ring[idx+3]; \
+ obj_table[i] = ring[idx]; \
+ obj_table[i+1] = ring[idx+1]; \
+ obj_table[i+2] = ring[idx+2]; \
+ obj_table[i+3] = ring[idx+3]; \
} \
switch (n & 0x3) { \
- case 3: obj_table[i++] = r->ring[idx++]; \
- case 2: obj_table[i++] = r->ring[idx++]; \
- case 1: obj_table[i++] = r->ring[idx++]; \
+ case 3: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 2: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 1: \
+ obj_table[i++] = ring[idx++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
for (idx = 0; i < n; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
} \
} while (0)

@@ -445,7 +449,7 @@ __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
if (n == 0)
goto end;

- ENQUEUE_PTRS();
+ ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
rte_smp_wmb();

update_tail(&r->prod, prod_head, prod_next);
@@ -550,7 +554,7 @@ __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
if (n == 0)
goto end;

- DEQUEUE_PTRS();
+ DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
rte_smp_rmb();

update_tail(&r->cons, cons_head, cons_next);
--
2.9.3
Jerin Jacob
2017-02-28 11:35:13 UTC
Permalink
Post by Bruce Richardson
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. For improved
performance use 128B rather than 64B alignment since it stops the producer
and consumer data being on adjacent cachelines.
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 6 ++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 8 ++------
4 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/config/common_base b/config/common_base
index aeee13e..099ffda 100644
--- a/config/common_base
+++ b/config/common_base
@@ -448,7 +448,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e25ea9f..ea45e0c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -110,6 +110,12 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..04fe667 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -168,7 +168,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
I think we need to use RTE_CACHE_LINE_MIN_SIZE instead of
RTE_CACHE_LINE_SIZE for alignment here. PPC and ThunderX1 targets are cache line
size of 128B
Post by Bruce Richardson
+ } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
/** Ring consumer status. */
struct cons {
@@ -177,11 +177,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Consumer head. */
volatile uint32_t tail; /**< Consumer tail. */
-#ifdef RTE_RING_SPLIT_PROD_CONS
- } cons __rte_cache_aligned;
-#else
- } cons;
-#endif
+ } cons __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
--
2.9.3
Bruce Richardson
2017-02-28 11:57:03 UTC
Permalink
Post by Jerin Jacob
Post by Bruce Richardson
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. For improved
performance use 128B rather than 64B alignment since it stops the producer
and consumer data being on adjacent cachelines.
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 6 ++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 8 ++------
4 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/config/common_base b/config/common_base
index aeee13e..099ffda 100644
--- a/config/common_base
+++ b/config/common_base
@@ -448,7 +448,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e25ea9f..ea45e0c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -110,6 +110,12 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..04fe667 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -168,7 +168,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
I think we need to use RTE_CACHE_LINE_MIN_SIZE instead of
RTE_CACHE_LINE_SIZE for alignment here. PPC and ThunderX1 targets are cache line
size of 128B
Sure.

However, can you perhaps try a performance test and check to see if
there is a performance difference between the two values before I change
it? In my tests I see improved performance by having an extra blank
cache-line between the producer and consumer data.

/Bruce
Jerin Jacob
2017-02-28 12:08:34 UTC
Permalink
Post by Bruce Richardson
Post by Jerin Jacob
Post by Bruce Richardson
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. For improved
performance use 128B rather than 64B alignment since it stops the producer
and consumer data being on adjacent cachelines.
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 6 ++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 8 ++------
4 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/config/common_base b/config/common_base
index aeee13e..099ffda 100644
--- a/config/common_base
+++ b/config/common_base
@@ -448,7 +448,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e25ea9f..ea45e0c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -110,6 +110,12 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..04fe667 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -168,7 +168,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
I think we need to use RTE_CACHE_LINE_MIN_SIZE instead of
RTE_CACHE_LINE_SIZE for alignment here. PPC and ThunderX1 targets are cache line
size of 128B
Sure.
However, can you perhaps try a performance test and check to see if
there is a performance difference between the two values before I change
it? In my tests I see improved performance by having an extra blank
cache-line between the producer and consumer data.
Sure. Which test are you running to measure the performance difference?
Is it app/test/test_ring_perf.c?
Post by Bruce Richardson
/Bruce
Bruce Richardson
2017-02-28 13:52:26 UTC
Permalink
Post by Jerin Jacob
Post by Bruce Richardson
Post by Jerin Jacob
Post by Bruce Richardson
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. For improved
performance use 128B rather than 64B alignment since it stops the producer
and consumer data being on adjacent cachelines.
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 6 ++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 8 ++------
4 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/config/common_base b/config/common_base
index aeee13e..099ffda 100644
--- a/config/common_base
+++ b/config/common_base
@@ -448,7 +448,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e25ea9f..ea45e0c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -110,6 +110,12 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..04fe667 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -168,7 +168,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
I think we need to use RTE_CACHE_LINE_MIN_SIZE instead of
RTE_CACHE_LINE_SIZE for alignment here. PPC and ThunderX1 targets are cache line
size of 128B
Sure.
However, can you perhaps try a performance test and check to see if
there is a performance difference between the two values before I change
it? In my tests I see improved performance by having an extra blank
cache-line between the producer and consumer data.
Sure. Which test are you running to measure the performance difference?
Is it app/test/test_ring_perf.c?
Yep, just the basic ring perf test. I look mostly at the core-to-core
numbers, since hyperthread-to-hyperthread or NUMA socket to NUMA socket
would be far less common use cases IMHO.

/Bruce
Jerin Jacob
2017-02-28 17:54:25 UTC
Permalink
Post by Bruce Richardson
Post by Jerin Jacob
Post by Bruce Richardson
Post by Jerin Jacob
Post by Bruce Richardson
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. For improved
performance use 128B rather than 64B alignment since it stops the producer
and consumer data being on adjacent cachelines.
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 6 ++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 8 ++------
4 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/config/common_base b/config/common_base
index aeee13e..099ffda 100644
--- a/config/common_base
+++ b/config/common_base
@@ -448,7 +448,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e25ea9f..ea45e0c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -110,6 +110,12 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..04fe667 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -168,7 +168,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
I think we need to use RTE_CACHE_LINE_MIN_SIZE instead of
RTE_CACHE_LINE_SIZE for alignment here. PPC and ThunderX1 targets are cache line
size of 128B
Sure.
However, can you perhaps try a performance test and check to see if
there is a performance difference between the two values before I change
it? In my tests I see improved performance by having an extra blank
cache-line between the producer and consumer data.
Sure. Which test are you running to measure the performance difference?
Is it app/test/test_ring_perf.c?
Yep, just the basic ring perf test. I look mostly at the core-to-core
numbers, since hyperthread-to-hyperthread or NUMA socket to NUMA socket
would be far less common use cases IMHO.
Performance test result shows regression with RTE_CACHE_LINE_MIN_SIZE
scheme in some use case and some use case has higher performance(Testing using
two physical cores)


# base code
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 84
MP/MC single enq/dequeue: 301
SP/SC burst enq/dequeue (size: 8): 20
MP/MC burst enq/dequeue (size: 8): 46
SP/SC burst enq/dequeue (size: 32): 12
MP/MC burst enq/dequeue (size: 32): 18

### Testing empty dequeue ###
SC empty dequeue: 7.11
MC empty dequeue: 12.15

### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 19.08
MP/MC bulk enq/dequeue (size: 8): 46.28
SP/SC bulk enq/dequeue (size: 32): 11.89
MP/MC bulk enq/dequeue (size: 32): 18.84

### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 37.42
MP/MC bulk enq/dequeue (size: 8): 73.32
SP/SC bulk enq/dequeue (size: 32): 18.69
MP/MC bulk enq/dequeue (size: 32): 24.59
Test OK

# with ring rework patch
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 84
MP/MC single enq/dequeue: 301
SP/SC burst enq/dequeue (size: 8): 19
MP/MC burst enq/dequeue (size: 8): 45
SP/SC burst enq/dequeue (size: 32): 11
MP/MC burst enq/dequeue (size: 32): 18

### Testing empty dequeue ###
SC empty dequeue: 7.10
MC empty dequeue: 12.15

### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 18.59
MP/MC bulk enq/dequeue (size: 8): 45.49
SP/SC bulk enq/dequeue (size: 32): 11.67
MP/MC bulk enq/dequeue (size: 32): 18.65

### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 37.41
MP/MC bulk enq/dequeue (size: 8): 72.98
SP/SC bulk enq/dequeue (size: 32): 18.69
MP/MC bulk enq/dequeue (size: 32): 24.59
Test OK
RTE>>

# with ring rework patch + cache-line size change to one on 128BCL target
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 90
MP/MC single enq/dequeue: 317
SP/SC burst enq/dequeue (size: 8): 20
MP/MC burst enq/dequeue (size: 8): 48
SP/SC burst enq/dequeue (size: 32): 11
MP/MC burst enq/dequeue (size: 32): 18

### Testing empty dequeue ###
SC empty dequeue: 8.10
MC empty dequeue: 11.15

### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 20.24
MP/MC bulk enq/dequeue (size: 8): 48.43
SP/SC bulk enq/dequeue (size: 32): 11.01
MP/MC bulk enq/dequeue (size: 32): 18.43

### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 25.92
MP/MC bulk enq/dequeue (size: 8): 69.76
SP/SC bulk enq/dequeue (size: 32): 14.27
MP/MC bulk enq/dequeue (size: 32): 22.94
Test OK
RTE>>
Bruce Richardson
2017-03-01 09:47:03 UTC
Permalink
Post by Jerin Jacob
Post by Bruce Richardson
Post by Jerin Jacob
Post by Bruce Richardson
Post by Jerin Jacob
Post by Bruce Richardson
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. For improved
performance use 128B rather than 64B alignment since it stops the producer
and consumer data being on adjacent cachelines.
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 6 ++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 8 ++------
4 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/config/common_base b/config/common_base
index aeee13e..099ffda 100644
--- a/config/common_base
+++ b/config/common_base
@@ -448,7 +448,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e25ea9f..ea45e0c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -110,6 +110,12 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================
+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..04fe667 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -168,7 +168,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(RTE_CACHE_LINE_SIZE * 2);
I think we need to use RTE_CACHE_LINE_MIN_SIZE instead of
RTE_CACHE_LINE_SIZE for alignment here. PPC and ThunderX1 targets are cache line
size of 128B
Sure.
However, can you perhaps try a performance test and check to see if
there is a performance difference between the two values before I change
it? In my tests I see improved performance by having an extra blank
cache-line between the producer and consumer data.
Sure. Which test are you running to measure the performance difference?
Is it app/test/test_ring_perf.c?
Yep, just the basic ring perf test. I look mostly at the core-to-core
numbers, since hyperthread-to-hyperthread or NUMA socket to NUMA socket
would be far less common use cases IMHO.
Performance test result shows regression with RTE_CACHE_LINE_MIN_SIZE
scheme in some use case and some use case has higher performance(Testing using
two physical cores)
# base code
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 84
MP/MC single enq/dequeue: 301
SP/SC burst enq/dequeue (size: 8): 20
MP/MC burst enq/dequeue (size: 8): 46
SP/SC burst enq/dequeue (size: 32): 12
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 7.11
MC empty dequeue: 12.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 19.08
MP/MC bulk enq/dequeue (size: 8): 46.28
SP/SC bulk enq/dequeue (size: 32): 11.89
MP/MC bulk enq/dequeue (size: 32): 18.84
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 37.42
MP/MC bulk enq/dequeue (size: 8): 73.32
SP/SC bulk enq/dequeue (size: 32): 18.69
MP/MC bulk enq/dequeue (size: 32): 24.59
Test OK
# with ring rework patch
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 84
MP/MC single enq/dequeue: 301
SP/SC burst enq/dequeue (size: 8): 19
MP/MC burst enq/dequeue (size: 8): 45
SP/SC burst enq/dequeue (size: 32): 11
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 7.10
MC empty dequeue: 12.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 18.59
MP/MC bulk enq/dequeue (size: 8): 45.49
SP/SC bulk enq/dequeue (size: 32): 11.67
MP/MC bulk enq/dequeue (size: 32): 18.65
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 37.41
MP/MC bulk enq/dequeue (size: 8): 72.98
SP/SC bulk enq/dequeue (size: 32): 18.69
MP/MC bulk enq/dequeue (size: 32): 24.59
Test OK
RTE>>
# with ring rework patch + cache-line size change to one on 128BCL target
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 90
MP/MC single enq/dequeue: 317
SP/SC burst enq/dequeue (size: 8): 20
MP/MC burst enq/dequeue (size: 8): 48
SP/SC burst enq/dequeue (size: 32): 11
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 8.10
MC empty dequeue: 11.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 20.24
MP/MC bulk enq/dequeue (size: 8): 48.43
SP/SC bulk enq/dequeue (size: 32): 11.01
MP/MC bulk enq/dequeue (size: 32): 18.43
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 25.92
MP/MC bulk enq/dequeue (size: 8): 69.76
SP/SC bulk enq/dequeue (size: 32): 14.27
MP/MC bulk enq/dequeue (size: 32): 22.94
Test OK
RTE>>
So given that there is not much difference here, is the MIN_SIZE i.e.
forced 64B, your preference, rather than actual cacheline-size?

/Bruce
Olivier Matz
2017-03-01 10:17:53 UTC
Permalink
Hi Bruce,

On Wed, 1 Mar 2017 09:47:03 +0000, Bruce Richardson
Post by Bruce Richardson
Post by Jerin Jacob
Post by Bruce Richardson
Users compiling DPDK should not need to know or care
about the arrangement of cachelines in the rte_ring
structure. Therefore just remove the build option and set
the structures to be always split. For improved
performance use 128B rather than 64B alignment since it
stops the producer and consumer data being on adjacent
You say you see an improved performance on Intel by having an extra
blank cache-line between the producer and consumer data. Do you have an
idea why it behaves like this? Do you think it is related to the
hardware adjacent cache line prefetcher?
Post by Bruce Richardson
[...]
Post by Jerin Jacob
# base code
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 84
MP/MC single enq/dequeue: 301
SP/SC burst enq/dequeue (size: 8): 20
MP/MC burst enq/dequeue (size: 8): 46
SP/SC burst enq/dequeue (size: 32): 12
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 7.11
MC empty dequeue: 12.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 19.08
MP/MC bulk enq/dequeue (size: 8): 46.28
SP/SC bulk enq/dequeue (size: 32): 11.89
MP/MC bulk enq/dequeue (size: 32): 18.84
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 37.42
MP/MC bulk enq/dequeue (size: 8): 73.32
SP/SC bulk enq/dequeue (size: 32): 18.69
MP/MC bulk enq/dequeue (size: 32): 24.59
Test OK
# with ring rework patch
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 84
MP/MC single enq/dequeue: 301
SP/SC burst enq/dequeue (size: 8): 19
MP/MC burst enq/dequeue (size: 8): 45
SP/SC burst enq/dequeue (size: 32): 11
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 7.10
MC empty dequeue: 12.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 18.59
MP/MC bulk enq/dequeue (size: 8): 45.49
SP/SC bulk enq/dequeue (size: 32): 11.67
MP/MC bulk enq/dequeue (size: 32): 18.65
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 37.41
MP/MC bulk enq/dequeue (size: 8): 72.98
SP/SC bulk enq/dequeue (size: 32): 18.69
MP/MC bulk enq/dequeue (size: 32): 24.59
Test OK
RTE>>
# with ring rework patch + cache-line size change to one on 128BCL target
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 90
MP/MC single enq/dequeue: 317
SP/SC burst enq/dequeue (size: 8): 20
MP/MC burst enq/dequeue (size: 8): 48
SP/SC burst enq/dequeue (size: 32): 11
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 8.10
MC empty dequeue: 11.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 20.24
MP/MC bulk enq/dequeue (size: 8): 48.43
SP/SC bulk enq/dequeue (size: 32): 11.01
MP/MC bulk enq/dequeue (size: 32): 18.43
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 25.92
MP/MC bulk enq/dequeue (size: 8): 69.76
SP/SC bulk enq/dequeue (size: 32): 14.27
MP/MC bulk enq/dequeue (size: 32): 22.94
Test OK
RTE>>
So given that there is not much difference here, is the MIN_SIZE i.e.
forced 64B, your preference, rather than actual cacheline-size?
I don't quite like this macro CACHE_LINE_MIN_SIZE. For me, it does not
mean anything. The reasons for aligning on a cache line size are
straightforward, but when should we need to align on the minimum
cache line size supported by dpdk? For instance, in mbuf structure,
aligning on 64 would make more sense to me.

So, I would prefer using (RTE_CACHE_LINE_SIZE * 2) here. If we don't
want it on some architectures, or if this optimization is only for Intel
(or all archs that need this optim), I think we could have something
like:

/* bla bla */
#ifdef INTEL
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE * 2)
#else
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
#endif


Olivier
Bruce Richardson
2017-03-01 10:42:58 UTC
Permalink
Post by Olivier Matz
Hi Bruce,
On Wed, 1 Mar 2017 09:47:03 +0000, Bruce Richardson
Post by Bruce Richardson
Post by Jerin Jacob
Post by Bruce Richardson
Users compiling DPDK should not need to know or care
about the arrangement of cachelines in the rte_ring
structure. Therefore just remove the build option and set
the structures to be always split. For improved
performance use 128B rather than 64B alignment since it
stops the producer and consumer data being on adjacent
You say you see an improved performance on Intel by having an extra
blank cache-line between the producer and consumer data. Do you have an
idea why it behaves like this? Do you think it is related to the
hardware adjacent cache line prefetcher?
That is a likely candidate, but I haven't done a full analysis on the
details to know for sure what all the factors are. We see a similar
effect with the packet distributor library, which uses similar padding.
Post by Olivier Matz
Post by Bruce Richardson
[...]
Post by Jerin Jacob
# base code
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 84
MP/MC single enq/dequeue: 301
SP/SC burst enq/dequeue (size: 8): 20
MP/MC burst enq/dequeue (size: 8): 46
SP/SC burst enq/dequeue (size: 32): 12
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 7.11
MC empty dequeue: 12.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 19.08
MP/MC bulk enq/dequeue (size: 8): 46.28
SP/SC bulk enq/dequeue (size: 32): 11.89
MP/MC bulk enq/dequeue (size: 32): 18.84
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 37.42
MP/MC bulk enq/dequeue (size: 8): 73.32
SP/SC bulk enq/dequeue (size: 32): 18.69
MP/MC bulk enq/dequeue (size: 32): 24.59
Test OK
# with ring rework patch
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 84
MP/MC single enq/dequeue: 301
SP/SC burst enq/dequeue (size: 8): 19
MP/MC burst enq/dequeue (size: 8): 45
SP/SC burst enq/dequeue (size: 32): 11
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 7.10
MC empty dequeue: 12.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 18.59
MP/MC bulk enq/dequeue (size: 8): 45.49
SP/SC bulk enq/dequeue (size: 32): 11.67
MP/MC bulk enq/dequeue (size: 32): 18.65
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 37.41
MP/MC bulk enq/dequeue (size: 8): 72.98
SP/SC bulk enq/dequeue (size: 32): 18.69
MP/MC bulk enq/dequeue (size: 32): 24.59
Test OK
RTE>>
# with ring rework patch + cache-line size change to one on 128BCL target
RTE>>ring_perf_autotest
### Testing single element and burst enq/deq ###
SP/SC single enq/dequeue: 90
MP/MC single enq/dequeue: 317
SP/SC burst enq/dequeue (size: 8): 20
MP/MC burst enq/dequeue (size: 8): 48
SP/SC burst enq/dequeue (size: 32): 11
MP/MC burst enq/dequeue (size: 32): 18
### Testing empty dequeue ###
SC empty dequeue: 8.10
MC empty dequeue: 11.15
### Testing using a single lcore ###
SP/SC bulk enq/dequeue (size: 8): 20.24
MP/MC bulk enq/dequeue (size: 8): 48.43
SP/SC bulk enq/dequeue (size: 32): 11.01
MP/MC bulk enq/dequeue (size: 32): 18.43
### Testing using two physical cores ###
SP/SC bulk enq/dequeue (size: 8): 25.92
MP/MC bulk enq/dequeue (size: 8): 69.76
SP/SC bulk enq/dequeue (size: 32): 14.27
MP/MC bulk enq/dequeue (size: 32): 22.94
Test OK
RTE>>
So given that there is not much difference here, is the MIN_SIZE i.e.
forced 64B, your preference, rather than actual cacheline-size?
I don't quite like this macro CACHE_LINE_MIN_SIZE. For me, it does not
mean anything. The reasons for aligning on a cache line size are
straightforward, but when should we need to align on the minimum
cache line size supported by dpdk? For instance, in mbuf structure,
aligning on 64 would make more sense to me.
So, I would prefer using (RTE_CACHE_LINE_SIZE * 2) here. If we don't
want it on some architectures, or if this optimization is only for Intel
(or all archs that need this optim), I think we could have something
/* bla bla */
#ifdef INTEL
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE * 2)
#else
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
#endif
I would agree that CACHE_LINE_MIN_SIZE probably doesn't make any sense
here, but I'm happy to put in any suitable scheme that others are happy
with. The options are:

* Keep as-is:
adv: simplest option, disadv: wastes 128B * 2 on some platforms
* Change to MIN_SIZE:
adv: no ifdefs, disadv: doesn't make much sense logically here
* Use ifdefs:
adv: each platform gets what's best for it, disadv: untidy code, may
be harder to maintain
* Use hard-coded 128:
adv: short and simple, disadv: misses any logical reason why 128 is
used, i.e. magic number

I'm ok with any option above.

/Bruce
Olivier Matz
2017-03-01 11:06:33 UTC
Permalink
Post by Bruce Richardson
Post by Olivier Matz
On Wed, 1 Mar 2017 09:47:03 +0000, Bruce Richardson
Post by Bruce Richardson
So given that there is not much difference here, is the MIN_SIZE i.e.
forced 64B, your preference, rather than actual cacheline-size?
[...]
Post by Olivier Matz
I don't quite like this macro CACHE_LINE_MIN_SIZE. For me, it does not
mean anything. The reasons for aligning on a cache line size are
straightforward, but when should we need to align on the minimum
cache line size supported by dpdk? For instance, in mbuf structure,
aligning on 64 would make more sense to me.
So, I would prefer using (RTE_CACHE_LINE_SIZE * 2) here. If we don't
want it on some architectures, or if this optimization is only for Intel
(or all archs that need this optim), I think we could have something
/* bla bla */
#ifdef INTEL
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE * 2)
#else
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
#endif
I would agree that CACHE_LINE_MIN_SIZE probably doesn't make any sense
here, but I'm happy to put in any suitable scheme that others are happy
adv: simplest option, disadv: wastes 128B * 2 on some platforms
adv: no ifdefs, disadv: doesn't make much sense logically here
adv: each platform gets what's best for it, disadv: untidy code, may
be harder to maintain
adv: short and simple, disadv: misses any logical reason why 128 is
used, i.e. magic number
I'm ok with any option above.
I'd vote for "Keep as-is" or "Use ifdefs".

Olivier
Jerin Jacob
2017-03-01 11:19:56 UTC
Permalink
Post by Olivier Matz
Post by Bruce Richardson
Post by Olivier Matz
On Wed, 1 Mar 2017 09:47:03 +0000, Bruce Richardson
Post by Bruce Richardson
So given that there is not much difference here, is the MIN_SIZE i.e.
forced 64B, your preference, rather than actual cacheline-size?
[...]
Post by Olivier Matz
I don't quite like this macro CACHE_LINE_MIN_SIZE. For me, it does not
mean anything. The reasons for aligning on a cache line size are
straightforward, but when should we need to align on the minimum
cache line size supported by dpdk? For instance, in mbuf structure,
aligning on 64 would make more sense to me.
So, I would prefer using (RTE_CACHE_LINE_SIZE * 2) here. If we don't
want it on some architectures, or if this optimization is only for Intel
(or all archs that need this optim), I think we could have something
/* bla bla */
#ifdef INTEL
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE * 2)
#else
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
#endif
I would agree that CACHE_LINE_MIN_SIZE probably doesn't make any sense
here, but I'm happy to put in any suitable scheme that others are happy
adv: simplest option, disadv: wastes 128B * 2 on some platforms
adv: no ifdefs, disadv: doesn't make much sense logically here
adv: each platform gets what's best for it, disadv: untidy code, may
be harder to maintain
adv: short and simple, disadv: misses any logical reason why 128 is
used, i.e. magic number
I'm ok with any option above.
I'd vote for "Keep as-is" or "Use ifdefs".
I'd vote for "Use ifdefs", default configuration can be "RTE_CACHE_LINE_SIZE
* 2" but a target can override if required.
Post by Olivier Matz
Olivier
Bruce Richardson
2017-03-01 12:12:53 UTC
Permalink
Post by Jerin Jacob
Post by Olivier Matz
Post by Bruce Richardson
Post by Olivier Matz
On Wed, 1 Mar 2017 09:47:03 +0000, Bruce Richardson
Post by Bruce Richardson
So given that there is not much difference here, is the MIN_SIZE i.e.
forced 64B, your preference, rather than actual cacheline-size?
[...]
Post by Olivier Matz
I don't quite like this macro CACHE_LINE_MIN_SIZE. For me, it does not
mean anything. The reasons for aligning on a cache line size are
straightforward, but when should we need to align on the minimum
cache line size supported by dpdk? For instance, in mbuf structure,
aligning on 64 would make more sense to me.
So, I would prefer using (RTE_CACHE_LINE_SIZE * 2) here. If we don't
want it on some architectures, or if this optimization is only for Intel
(or all archs that need this optim), I think we could have something
/* bla bla */
#ifdef INTEL
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE * 2)
#else
#define __rte_ring_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
#endif
I would agree that CACHE_LINE_MIN_SIZE probably doesn't make any sense
here, but I'm happy to put in any suitable scheme that others are happy
adv: simplest option, disadv: wastes 128B * 2 on some platforms
adv: no ifdefs, disadv: doesn't make much sense logically here
adv: each platform gets what's best for it, disadv: untidy code, may
be harder to maintain
adv: short and simple, disadv: misses any logical reason why 128 is
used, i.e. magic number
I'm ok with any option above.
I'd vote for "Keep as-is" or "Use ifdefs".
I'd vote for "Use ifdefs", default configuration can be "RTE_CACHE_LINE_SIZE
* 2" but a target can override if required.
Ok. Will update with some #ifdefs in v2.

/Bruce
Bruce Richardson
2017-03-07 11:32:03 UTC
Permalink
NOTE: this set depends on the v2 cleanup set sent previously.
http://dpdk.org/ml/archives/dev/2017-February/thread.html#58200

This patchset make a set of, sometimes non-backward compatible, cleanup
changes to the rte_ring code in order to improve it. The resulting code is
shorter, since the existing functions are restructured to reduce code
duplication, as well as being more consistent in behaviour. The specific
changes made are explained in each patch which makes that change.

Changes in V2:
* Eliminated extra cacheline padding where cachelines are 128B
* Renamed rte_ring_ht_ptr struct to rte_ring_headtail
* Removed missed references to ring watermarks in test code and docs

This patchset is largely the same as that posted previously on-list as
an RFC:
http://dpdk.org/ml/archives/dev/2017-February/thread.html#56982

Changes in V1 from RFC:
* Included release notes updates as changes are made in each patch
* Fixed some missed comment updates when changing the code
* Separated some initial fixup patches from this set to send separately
* Dropped the final two patches for an rte_event_ring, as not relevant
for this set. That can be done as a separate set later.
* The macros for copying the pointers have an extra parameter added,
indicating the start of the ring buffer itself. This allows more
flexibility for reusing them in other ring implementations.

Bruce Richardson (14):
ring: remove split cacheline build setting
ring: create common structure for prod and cons metadata
ring: eliminate duplication of size and mask fields
ring: remove debug setting
ring: remove the yield when waiting for tail update
ring: remove watermark support
ring: make bulk and burst fn return vals consistent
ring: allow enqueue fns to return free space value
ring: allow dequeue fns to return remaining entry count
examples/quota_watermark: use ring space for watermarks
ring: reduce scope of local variables
ring: separate out head index manipulation for enq/deq
ring: create common function for updating tail idx
ring: make ring struct and enq/deq macros type agnostic

app/pdump/main.c | 2 +-
config/common_base | 3 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 -
doc/guides/prog_guide/ring_lib.rst | 15 -
doc/guides/rel_notes/release_17_05.rst | 32 +
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
drivers/crypto/null/null_crypto_pmd.c | 2 +-
drivers/net/bonding/rte_eth_bond_pmd.c | 3 +-
drivers/net/ring/rte_eth_ring.c | 4 +-
examples/distributor/main.c | 5 +-
examples/load_balancer/runtime.c | 34 +-
.../client_server_mp/mp_client/client.c | 9 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 13 +-
examples/qos_sched/app_thread.c | 14 +-
examples/quota_watermark/qw/init.c | 5 +-
examples/quota_watermark/qw/main.c | 21 +-
examples/quota_watermark/qw/main.h | 1 +
examples/quota_watermark/qwctl/commands.c | 4 +-
examples/quota_watermark/qwctl/qwctl.c | 2 +
examples/quota_watermark/qwctl/qwctl.h | 1 +
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 5 +-
lib/librte_mempool/rte_mempool_ring.c | 12 +-
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_frag.c | 3 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 34 +-
lib/librte_ring/rte_ring.c | 76 +--
lib/librte_ring/rte_ring.h | 760 ++++++++-------------
test/test-pipeline/pipeline_hash.c | 5 +-
test/test-pipeline/runtime.c | 19 +-
test/test/autotest_test_funcs.py | 7 -
test/test/commands.c | 52 --
test/test/test_link_bonding_mode4.c | 6 +-
test/test/test_pmd_ring_perf.c | 12 +-
test/test/test_ring.c | 704 +++----------------
test/test/test_ring_perf.c | 36 +-
test/test/test_table_acl.c | 2 +-
test/test/test_table_pipeline.c | 2 +-
test/test/test_table_ports.c | 12 +-
test/test/virtual_pmd.c | 8 +-
43 files changed, 554 insertions(+), 1388 deletions(-)
--
2.9.3
Bruce Richardson
2017-03-07 11:32:07 UTC
Permalink
The debug option only provided statistics to the user, most of
which could be tracked by the application itself. Remove this as a
compile time option, and feature, simplifying the code.

Signed-off-by: Bruce Richardson <***@intel.com>
---
config/common_base | 1 -
doc/guides/prog_guide/ring_lib.rst | 7 -
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.c | 41 ----
lib/librte_ring/rte_ring.h | 97 +-------
test/test/test_ring.c | 410 ---------------------------------
6 files changed, 13 insertions(+), 544 deletions(-)

diff --git a/config/common_base b/config/common_base
index 099ffda..b3d8272 100644
--- a/config/common_base
+++ b/config/common_base
@@ -447,7 +447,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_LIBRTE_RING_DEBUG=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index 9f69753..d4ab502 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -110,13 +110,6 @@ Once an enqueue operation reaches the high water mark, the producer is notified,

This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.

-Debug
-~~~~~
-
-When debug is enabled (CONFIG_RTE_LIBRTE_RING_DEBUG is set),
-the library stores some per-ring statistic counters about the number of enqueues/dequeues.
-These statistics are per-core to avoid concurrent accesses or atomic operations.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index ea45e0c..e0ebd71 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -116,6 +116,7 @@ API Changes
have been made to it:

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
+ * removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 80fc356..90ee63f 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -131,12 +131,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_LIBRTE_RING_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif

/* init the ring structure */
memset(r, 0, sizeof(*r));
@@ -284,11 +278,6 @@ rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats sum;
- unsigned lcore_id;
-#endif
-
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
@@ -302,36 +291,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " watermark=0\n");
else
fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
-
- /* sum and dump statistics */
-#ifdef RTE_LIBRTE_RING_DEBUG
- memset(&sum, 0, sizeof(sum));
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
- sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
- sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
- sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
- sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
- sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
- sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
- sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
- sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
- sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
- }
- fprintf(f, " size=%"PRIu32"\n", r->size);
- fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
- fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
- fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
- fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
- fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
- fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
- fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
- fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
- fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
- fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
-#else
- fprintf(f, " no statistics available\n");
-#endif
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 61c0982..af7b7d4 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -109,24 +109,6 @@ enum rte_ring_queue_behavior {
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};

-#ifdef RTE_LIBRTE_RING_DEBUG
-/**
- * A structure that stores the ring statistics (per-lcore).
- */
-struct rte_ring_debug_stats {
- uint64_t enq_success_bulk; /**< Successful enqueues number. */
- uint64_t enq_success_objs; /**< Objects successfully enqueued. */
- uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
- uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
- uint64_t enq_fail_bulk; /**< Failed enqueues number. */
- uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
- uint64_t deq_success_bulk; /**< Successful dequeues number. */
- uint64_t deq_success_objs; /**< Objects successfully dequeued. */
- uint64_t deq_fail_bulk; /**< Failed dequeues number. */
- uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
-} __rte_cache_aligned;
-#endif
-
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
@@ -187,10 +169,6 @@ struct rte_ring {
/** Ring consumer status. */
struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);

-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
-#endif
-
void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
* not volatile so need to be careful
* about compiler re-ordering */
@@ -202,27 +180,6 @@ struct rte_ring {
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
- * @internal When debug is enabled, store ring statistics.
- * @param r
- * A pointer to the ring.
- * @param name
- * The name of the statistics field to increment in the ring.
- * @param n
- * The number to add to the object-oriented statistics.
- */
-#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- if (__lcore_id < RTE_MAX_LCORE) { \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
- } \
- } while(0)
-#else
-#define __RING_STAT_ADD(r, name, n) do {} while(0)
-#endif
-
-/**
* Calculate the memory size needed for a ring
*
* This function returns the number of bytes needed for a ring, given
@@ -463,17 +420,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -488,15 +440,11 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

/*
* If there are other enqueues in progress that preceded us,
@@ -560,17 +508,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -583,15 +526,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

r->prod.tail = prod_next;
return ret;
@@ -655,16 +594,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

/* Set the actual entries for dequeue */
if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -694,7 +628,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
sched_yield();
}
}
- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -741,16 +674,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = prod_tail - cons_head;

if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -762,7 +690,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
}
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 5f09097..3891f5d 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -763,412 +763,6 @@ test_ring_burst_basic(void)
return -1;
}

-static int
-test_ring_stats(void)
-{
-
-#ifndef RTE_LIBRTE_RING_DEBUG
- printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
- return 0;
-#else
- void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
- int ret;
- unsigned i;
- unsigned num_items = 0;
- unsigned failed_enqueue_ops = 0;
- unsigned failed_enqueue_items = 0;
- unsigned failed_dequeue_ops = 0;
- unsigned failed_dequeue_items = 0;
- unsigned last_enqueue_ops = 0;
- unsigned last_enqueue_items = 0;
- unsigned last_quota_ops = 0;
- unsigned last_quota_items = 0;
- unsigned lcore_id = rte_lcore_id();
- struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
-
- printf("Test the ring stats.\n");
-
- /* Reset the watermark in case it was set in another test. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Allocate some dummy object pointers. */
- src = malloc(RING_SIZE*2*sizeof(void *));
- if (src == NULL)
- goto fail;
-
- for (i = 0; i < RING_SIZE*2 ; i++) {
- src[i] = (void *)(unsigned long)i;
- }
-
- /* Allocate some memory for copied objects. */
- dst = malloc(RING_SIZE*2*sizeof(void *));
- if (dst == NULL)
- goto fail;
-
- memset(dst, 0, RING_SIZE*2*sizeof(void *));
-
- /* Set the head and tail pointers. */
- cur_src = src;
- cur_dst = dst;
-
- /* Do Enqueue tests. */
- printf("Test the dequeue stats.\n");
-
- /* Fill the ring up to RING_SIZE -1. */
- printf("Fill the ring.\n");
- for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
- rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- }
-
- /* Adjust for final enqueue = MAX_BULK -1. */
- cur_src--;
-
- printf("Verify that the ring is full.\n");
- if (rte_ring_full(r) != 1)
- goto fail;
-
-
- printf("Verify the enqueue success stats.\n");
- /* Stats should match above enqueue operations to fill the ring. */
- if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Current max objects is RING_SIZE -1. */
- if (ring_stats->enq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any failures yet. */
- if (ring_stats->enq_fail_bulk != 0)
- goto fail;
- if (ring_stats->enq_fail_objs != 0)
- goto fail;
-
-
- printf("Test stats for SP burst enqueue to a full ring.\n");
- num_items = 2;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for SP bulk enqueue to a full ring.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP burst enqueue to a full ring.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP bulk enqueue to a full ring.\n");
- num_items = 16;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- /* Do Dequeue tests. */
- printf("Test the dequeue stats.\n");
-
- printf("Empty the ring.\n");
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* There was only RING_SIZE -1 objects to dequeue. */
- cur_dst++;
-
- printf("Verify ring is empty.\n");
- if (1 != rte_ring_empty(r))
- goto fail;
-
- printf("Verify the dequeue success stats.\n");
- /* Stats should match above dequeue operations. */
- if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Objects dequeued is RING_SIZE -1. */
- if (ring_stats->deq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any dequeue failure stats yet. */
- if (ring_stats->deq_fail_bulk != 0)
- goto fail;
-
- printf("Test stats for SC burst dequeue with an empty ring.\n");
- num_items = 2;
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for SC bulk dequeue with an empty ring.\n");
- num_items = 4;
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC burst dequeue with an empty ring.\n");
- num_items = 8;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC bulk dequeue with an empty ring.\n");
- num_items = 16;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test total enqueue/dequeue stats.\n");
- /* At this point the enqueue and dequeue stats should be the same. */
- if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
- goto fail;
- if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
- goto fail;
- if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
- goto fail;
- if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
- goto fail;
-
-
- /* Watermark Tests. */
- printf("Test the watermark/quota stats.\n");
-
- printf("Verify the initial watermark stats.\n");
- /* Watermark stats should be 0 since there is no watermark. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Set a watermark. */
- rte_ring_set_water_mark(r, 16);
-
- /* Reset pointers. */
- cur_src = src;
- cur_dst = dst;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue below watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should still be 0. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Success stats should have increased. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
- goto fail;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue at watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != 1)
- goto fail;
- if (ring_stats->enq_quota_objs != num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP burst enqueue above watermark.\n");
- num_items = 1;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP burst enqueue above watermark.\n");
- num_items = 2;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP bulk enqueue above watermark.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP bulk enqueue above watermark.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- printf("Test watermark success stats.\n");
- /* Success stats should be same as last non-watermarked enqueue. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items)
- goto fail;
-
-
- /* Cleanup. */
-
- /* Empty the ring. */
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* Reset the watermark. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Free memory before test completed */
- free(src);
- free(dst);
- return 0;
-
-fail:
- free(src);
- free(dst);
- return -1;
-#endif
-}
-
/*
* it will always fail to create ring with a wrong ring size number in this function
*/
@@ -1335,10 +929,6 @@ test_ring(void)
if (test_ring_basic() < 0)
return -1;

- /* ring stats */
- if (test_ring_stats() < 0)
- return -1;
-
/* basic operations */
if (test_live_watermark_change() < 0)
return -1;
--
2.9.3
Bruce Richardson
2017-03-07 11:32:08 UTC
Permalink
There was a compile time setting to enable a ring to yield when
it entered a loop in mp or mc rings waiting for the tail pointer update.
Build time settings are not recommended for enabling/disabling features,
and since this was off by default, remove it completely. If needed, a
runtime enabled equivalent can be used.

Signed-off-by: Bruce Richardson <***@intel.com>
---
config/common_base | 1 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 ----
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.h | 35 +++++--------------------
4 files changed, 7 insertions(+), 35 deletions(-)

diff --git a/config/common_base b/config/common_base
index b3d8272..d5beadd 100644
--- a/config/common_base
+++ b/config/common_base
@@ -447,7 +447,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
# Compile librte_mempool
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 10a10a8..7c39cd2 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -352,11 +352,6 @@ Known Issues

3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.

- ``RTE_RING_PAUSE_REP_COUNT`` is defined for rte_ring to reduce contention. It's mainly for case 2, a yield is issued after number of times pause repeat.
-
- It adds a sched_yield() syscall if the thread spins for too long while waiting on the other thread to finish its operations on the ring.
- This gives the preempted thread a chance to proceed and finish with the ring enqueue/dequeue operation.
-
+ rte_timer

Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e0ebd71..c69ca8f 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -117,6 +117,7 @@ API Changes

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
+ * removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index af7b7d4..2177954 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -114,11 +114,6 @@ enum rte_ring_queue_behavior {
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)

-#ifndef RTE_RING_PAUSE_REP_COUNT
-#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
- * if RTE_RING_PAUSE_REP not defined. */
-#endif
-
struct rte_memzone; /* forward declaration, so as not to require memzone.h */

#if RTE_CACHE_LINE_SIZE < 128
@@ -396,7 +391,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -450,18 +445,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->prod.tail != prod_head)) {
+ while (unlikely(r->prod.tail != prod_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->prod.tail = prod_next;
return ret;
}
@@ -494,7 +480,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -571,7 +557,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -616,18 +602,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* If there are other dequeues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->cons.tail != cons_head)) {
+ while (unlikely(r->cons.tail != cons_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -662,7 +639,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-03-07 11:32:04 UTC
Permalink
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. On platforms with 64B
cachelines, for improved performance use 128B rather than 64B alignment
since it stops the producer and consumer data being on adjacent cachelines.

Signed-off-by: Bruce Richardson <***@intel.com>

---

V2: Limit the cacheline * 2 alignment to platforms with < 128B line size
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 6 ++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 16 ++++++++++------
4 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/config/common_base b/config/common_base
index aeee13e..099ffda 100644
--- a/config/common_base
+++ b/config/common_base
@@ -448,7 +448,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index e25ea9f..ea45e0c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -110,6 +110,12 @@ API Changes
Also, make sure to start the actual text at the margin.
=========================================================

+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+ have been made to it:
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..399ae3b 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -139,6 +139,14 @@ struct rte_ring_debug_stats {

struct rte_memzone; /* forward declaration, so as not to require memzone.h */

+#if RTE_CACHE_LINE_SIZE < 128
+#define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#else
+#define PROD_ALIGN RTE_CACHE_LINE_SIZE
+#define CONS_ALIGN RTE_CACHE_LINE_SIZE
+#endif
+
/**
* An RTE ring structure.
*
@@ -168,7 +176,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(PROD_ALIGN);

/** Ring consumer status. */
struct cons {
@@ -177,11 +185,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Consumer head. */
volatile uint32_t tail; /**< Consumer tail. */
-#ifdef RTE_RING_SPLIT_PROD_CONS
- } cons __rte_cache_aligned;
-#else
- } cons;
-#endif
+ } cons __rte_aligned(CONS_ALIGN);

#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
--
2.9.3
Bruce Richardson
2017-03-07 11:32:05 UTC
Permalink
create a common structure to hold the metadata for the producer and
the consumer, since both need essentially the same information - the
head and tail values, the ring size and mask.

Signed-off-by: Bruce Richardson <***@intel.com>

---

V2: renamed the shared structure based on maintainer feedback.
---
lib/librte_ring/rte_ring.h | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 399ae3b..659c6d0 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -147,6 +147,19 @@ struct rte_memzone; /* forward declaration, so as not to require memzone.h */
#define CONS_ALIGN RTE_CACHE_LINE_SIZE
#endif

+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_headtail {
+ volatile uint32_t head; /**< Prod/consumer head. */
+ volatile uint32_t tail; /**< Prod/consumer tail. */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ union {
+ uint32_t sp_enqueue; /**< True, if single producer. */
+ uint32_t sc_dequeue; /**< True, if single consumer. */
+ };
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */
+};
+
/**
* An RTE ring structure.
*
@@ -169,23 +182,10 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */

/** Ring producer status. */
- struct prod {
- uint32_t watermark; /**< Maximum items before EDQUOT. */
- uint32_t sp_enqueue; /**< True, if single producer. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Producer head. */
- volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_aligned(PROD_ALIGN);
+ struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);

/** Ring consumer status. */
- struct cons {
- uint32_t sc_dequeue; /**< True, if single consumer. */
- uint32_t size; /**< Size of the ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Consumer head. */
- volatile uint32_t tail; /**< Consumer tail. */
- } cons __rte_aligned(CONS_ALIGN);
+ struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);

#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
--
2.9.3
Thomas Monjalon
2017-03-15 14:01:49 UTC
Permalink
+ union {
+ uint32_t sp_enqueue; /**< True, if single producer. */
+ uint32_t sc_dequeue; /**< True, if single consumer. */
+ };
error: anonymous unions are a C11 extension
Bruce Richardson
2017-03-22 16:38:41 UTC
Permalink
Post by Thomas Monjalon
+ union {
+ uint32_t sp_enqueue; /**< True, if single producer. */
+ uint32_t sc_dequeue; /**< True, if single consumer. */
+ };
error: anonymous unions are a C11 extension
What clang version and other CFLAGS settings are you using? Clang
compilation runs fine for me with clang 3.9.1 on Fedora 25.

/Bruce
Bruce Richardson
2017-03-24 14:55:36 UTC
Permalink
Post by Thomas Monjalon
+ union {
+ uint32_t sp_enqueue; /**< True, if single producer. */
+ uint32_t sc_dequeue; /**< True, if single consumer. */
+ };
error: anonymous unions are a C11 extension
Olivier, Thomas, feedback on suggestions for fixing this? Note: I'm
still waiting to hear back on what compiler settings are needed to
trigger this error.

Two immediately obvious options:
* replace the union with a single variable called e.g. "single", i.e.
prod.single indicates single producer, and cons.single indicates
single consumer. The downside of this approach is that it makes the
patch a little bigger - as other code needs to be modified to use the
new name - and is not backward compatible for apps which
may reference this public structure memeber.
* just remove the union without renaming anything, leaving two structure
members called sp_enqueue and sc_dequeue. This uses a little more
space in the structure, which is not a big deal since it needs to fill
a cacheline anyway, but it is backward compatible in that no other
code should need to be modified.

Other options? My preference is for the first one. Given we are breaking
the ring API anyway, I think we might as well use the shorter name and
eliminate the need for the union, or multiple variables.

/Bruce
Olivier Matz
2017-03-24 16:41:34 UTC
Permalink
Hi Bruce,
Post by Bruce Richardson
Post by Thomas Monjalon
+ union {
+ uint32_t sp_enqueue; /**< True, if single producer. */
+ uint32_t sc_dequeue; /**< True, if single consumer. */
+ };
error: anonymous unions are a C11 extension
Olivier, Thomas, feedback on suggestions for fixing this? Note: I'm
still waiting to hear back on what compiler settings are needed to
trigger this error.
* replace the union with a single variable called e.g. "single", i.e.
prod.single indicates single producer, and cons.single indicates
single consumer. The downside of this approach is that it makes the
patch a little bigger - as other code needs to be modified to use the
new name - and is not backward compatible for apps which
may reference this public structure memeber.
* just remove the union without renaming anything, leaving two structure
members called sp_enqueue and sc_dequeue. This uses a little more
space in the structure, which is not a big deal since it needs to fill
a cacheline anyway, but it is backward compatible in that no other
code should need to be modified.
Other options? My preference is for the first one. Given we are breaking
the ring API anyway, I think we might as well use the shorter name and
eliminate the need for the union, or multiple variables.
What about adding RTE_STD_C11 like it's done in rte_mbuf?

I didn't try, but since mbuf compiles, it should solve this issue in ring.


Regards,
Olivier
Bruce Richardson
2017-03-24 16:57:18 UTC
Permalink
Post by Olivier Matz
Hi Bruce,
Post by Bruce Richardson
Post by Thomas Monjalon
+ union {
+ uint32_t sp_enqueue; /**< True, if single producer. */
+ uint32_t sc_dequeue; /**< True, if single consumer. */
+ };
error: anonymous unions are a C11 extension
Olivier, Thomas, feedback on suggestions for fixing this? Note: I'm
still waiting to hear back on what compiler settings are needed to
trigger this error.
* replace the union with a single variable called e.g. "single", i.e.
prod.single indicates single producer, and cons.single indicates
single consumer. The downside of this approach is that it makes the
patch a little bigger - as other code needs to be modified to use the
new name - and is not backward compatible for apps which
may reference this public structure memeber.
* just remove the union without renaming anything, leaving two structure
members called sp_enqueue and sc_dequeue. This uses a little more
space in the structure, which is not a big deal since it needs to fill
a cacheline anyway, but it is backward compatible in that no other
code should need to be modified.
Other options? My preference is for the first one. Given we are breaking
the ring API anyway, I think we might as well use the shorter name and
eliminate the need for the union, or multiple variables.
What about adding RTE_STD_C11 like it's done in rte_mbuf?
I didn't try, but since mbuf compiles, it should solve this issue in ring.
Yes, it might well. However, looking at the resulting code, I actually
think it's cleaner to have just one variable called "single" in the
structure. The union is really for backward compatibility, and there is
little point in doing so since we are changing the rest of the structure
in other ways.

Struct now looks like:
/* structure to hold a pair of head/tail values and other metadata */
struct rte_ring_headtail {
volatile uint32_t head; /**< Prod/consumer head. */
volatile uint32_t tail; /**< Prod/consumer tail. */
uint32_t single; /**< True if single prod/cons */
};

And the code checks read e.g. for single producer:

if (r->prod.single)

/Bruce
Bruce Richardson
2017-03-07 11:32:09 UTC
Permalink
Remove the watermark support. A future commit will add support for having
enqueue functions return the amount of free space in the ring, which will
allow applications to implement their own watermark checks, while also
being more useful to the app.

Signed-off-by: Bruce Richardson <***@intel.com>

---
V2: fix missed references to watermarks in v1

---
doc/guides/prog_guide/ring_lib.rst | 8 --
doc/guides/rel_notes/release_17_05.rst | 2 +
examples/Makefile | 2 +-
lib/librte_ring/rte_ring.c | 23 -----
lib/librte_ring/rte_ring.h | 58 +------------
test/test/autotest_test_funcs.py | 7 --
test/test/commands.c | 52 ------------
test/test/test_ring.c | 149 +--------------------------------
8 files changed, 8 insertions(+), 293 deletions(-)

diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index d4ab502..b31ab7a 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -102,14 +102,6 @@ Name
A ring is identified by a unique name.
It is not possible to create two rings with the same name (rte_ring_create() returns NULL if this is attempted).

-Water Marking
-~~~~~~~~~~~~~
-
-The ring can have a high water mark (threshold).
-Once an enqueue operation reaches the high water mark, the producer is notified, if the water mark is configured.
-
-This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index c69ca8f..4e748dc 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -118,6 +118,8 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
+ * removed the function ``rte_ring_set_water_mark`` as part of a general
+ removal of watermarks support in the library.

ABI Changes
-----------
diff --git a/examples/Makefile b/examples/Makefile
index da2bfdd..19cd5ad 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-DIRS-y += quota_watermark
+#DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 90ee63f..18fb644 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,7 +138,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->watermark = count;
r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
r->size = count;
@@ -256,24 +255,6 @@ rte_ring_free(struct rte_ring *r)
rte_free(te);
}

-/*
- * change the high water mark. If *count* is 0, water marking is
- * disabled
- */
-int
-rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
-{
- if (count >= r->size)
- return -EINVAL;
-
- /* if count is 0, disable the watermarking */
- if (count == 0)
- count = r->size;
-
- r->watermark = count;
- return 0;
-}
-
/* dump the status of the ring on the console */
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
@@ -287,10 +268,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->watermark == r->size)
- fprintf(f, " watermark=0\n");
- else
- fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 2177954..e7061be 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -156,7 +156,6 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */
uint32_t size; /**< Size of ring. */
uint32_t mask; /**< Mask (size-1) of ring. */
- uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -171,7 +170,6 @@ struct rte_ring {

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
-#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
@@ -277,26 +275,6 @@ struct rte_ring *rte_ring_create(const char *name, unsigned count,
void rte_ring_free(struct rte_ring *r);

/**
- * Change the high water mark.
- *
- * If *count* is 0, water marking is disabled. Otherwise, it is set to the
- * *count* value. The *count* value must be greater than 0 and less
- * than the ring size.
- *
- * This function can be called at any time (not necessarily at
- * initialization).
- *
- * @param r
- * A pointer to the ring structure.
- * @param count
- * The new water mark value.
- * @return
- * - 0: Success; water mark changed.
- * - -EINVAL: Invalid water mark value.
- */
-int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
-
-/**
* Dump the status of the ring to a file.
*
* @param f
@@ -377,8 +355,6 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -393,7 +369,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
int success;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -434,13 +409,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
/*
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
@@ -449,7 +417,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -468,8 +436,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -482,7 +448,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_next, free_entries;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

prod_head = r->prod.head;
cons_tail = r->cons.tail;
@@ -511,15 +476,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -685,8 +643,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -707,8 +663,6 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -733,8 +687,6 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -759,8 +711,6 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -778,8 +728,6 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -801,8 +749,6 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
diff --git a/test/test/autotest_test_funcs.py b/test/test/autotest_test_funcs.py
index 1c5f390..8da8fcd 100644
--- a/test/test/autotest_test_funcs.py
+++ b/test/test/autotest_test_funcs.py
@@ -292,11 +292,4 @@ def ring_autotest(child, test_name):
elif index == 2:
return -1, "Fail [Timeout]"

- child.sendline("set_watermark test 100")
- child.sendline("dump_ring test")
- index = child.expect([" watermark=100",
- pexpect.TIMEOUT], timeout=1)
- if index != 0:
- return -1, "Fail [Bad watermark]"
-
return 0, "Success"
diff --git a/test/test/commands.c b/test/test/commands.c
index 2df46b0..551c81d 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -228,57 +228,6 @@ cmdline_parse_inst_t cmd_dump_one = {

/****************/

-struct cmd_set_ring_result {
- cmdline_fixed_string_t set;
- cmdline_fixed_string_t name;
- uint32_t value;
-};
-
-static void cmd_set_ring_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_set_ring_result *res = parsed_result;
- struct rte_ring *r;
- int ret;
-
- r = rte_ring_lookup(res->name);
- if (r == NULL) {
- cmdline_printf(cl, "Cannot find ring\n");
- return;
- }
-
- if (!strcmp(res->set, "set_watermark")) {
- ret = rte_ring_set_water_mark(r, res->value);
- if (ret != 0)
- cmdline_printf(cl, "Cannot set water mark\n");
- }
-}
-
-cmdline_parse_token_string_t cmd_set_ring_set =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set,
- "set_watermark");
-
-cmdline_parse_token_string_t cmd_set_ring_name =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL);
-
-cmdline_parse_token_num_t cmd_set_ring_value =
- TOKEN_NUM_INITIALIZER(struct cmd_set_ring_result, value, UINT32);
-
-cmdline_parse_inst_t cmd_set_ring = {
- .f = cmd_set_ring_parsed, /* function to call */
- .data = NULL, /* 2nd arg of func */
- .help_str = "set watermark: "
- "set_watermark <ring_name> <value>",
- .tokens = { /* token list, NULL terminated */
- (void *)&cmd_set_ring_set,
- (void *)&cmd_set_ring_name,
- (void *)&cmd_set_ring_value,
- NULL,
- },
-};
-
-/****************/
-
struct cmd_quit_result {
cmdline_fixed_string_t quit;
};
@@ -419,7 +368,6 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_autotest,
(cmdline_parse_inst_t *)&cmd_dump,
(cmdline_parse_inst_t *)&cmd_dump_one,
- (cmdline_parse_inst_t *)&cmd_set_ring,
(cmdline_parse_inst_t *)&cmd_quit,
(cmdline_parse_inst_t *)&cmd_set_rxtx,
(cmdline_parse_inst_t *)&cmd_set_rxtx_anchor,
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 3891f5d..666a451 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -78,21 +78,6 @@
* - Dequeue one object, two objects, MAX_BULK objects
* - Check that dequeued pointers are correct
*
- * - Test watermark and default bulk enqueue/dequeue:
- *
- * - Set watermark
- * - Set default bulk value
- * - Enqueue objects, check that -EDQUOT is returned when
- * watermark is exceeded
- * - Check that dequeued pointers are correct
- *
- * #. Check live watermark change
- *
- * - Start a loop on another lcore that will enqueue and dequeue
- * objects in a ring. It will monitor the value of watermark.
- * - At the same time, change the watermark on the master lcore.
- * - The slave lcore will check that watermark changes from 16 to 32.
- *
* #. Performance tests.
*
* Tests done in test_ring_perf.c
@@ -115,123 +100,6 @@ static struct rte_ring *r;

#define TEST_RING_FULL_EMTPY_ITER 8

-static int
-check_live_watermark_change(__attribute__((unused)) void *dummy)
-{
- uint64_t hz = rte_get_timer_hz();
- void *obj_table[MAX_BULK];
- unsigned watermark, watermark_old = 16;
- uint64_t cur_time, end_time;
- int64_t diff = 0;
- int i, ret;
- unsigned count = 4;
-
- /* init the object table */
- memset(obj_table, 0, sizeof(obj_table));
- end_time = rte_get_timer_cycles() + (hz / 4);
-
- /* check that bulk and watermark are 4 and 32 (respectively) */
- while (diff >= 0) {
-
- /* add in ring until we reach watermark */
- ret = 0;
- for (i = 0; i < 16; i ++) {
- if (ret != 0)
- break;
- ret = rte_ring_enqueue_bulk(r, obj_table, count);
- }
-
- if (ret != -EDQUOT) {
- printf("Cannot enqueue objects, or watermark not "
- "reached (ret=%d)\n", ret);
- return -1;
- }
-
- /* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->watermark;
- if (watermark != watermark_old &&
- (watermark_old != 16 || watermark != 32)) {
- printf("Bad watermark change %u -> %u\n", watermark_old,
- watermark);
- return -1;
- }
- watermark_old = watermark;
-
- /* dequeue objects from ring */
- while (i--) {
- ret = rte_ring_dequeue_bulk(r, obj_table, count);
- if (ret != 0) {
- printf("Cannot dequeue (ret=%d)\n", ret);
- return -1;
- }
- }
-
- cur_time = rte_get_timer_cycles();
- diff = end_time - cur_time;
- }
-
- if (watermark_old != 32 ) {
- printf(" watermark was not updated (wm=%u)\n",
- watermark_old);
- return -1;
- }
-
- return 0;
-}
-
-static int
-test_live_watermark_change(void)
-{
- unsigned lcore_id = rte_lcore_id();
- unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
-
- printf("Test watermark live modification\n");
- rte_ring_set_water_mark(r, 16);
-
- /* launch a thread that will enqueue and dequeue, checking
- * watermark and quota */
- rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
-
- rte_delay_ms(100);
- rte_ring_set_water_mark(r, 32);
- rte_delay_ms(100);
-
- if (rte_eal_wait_lcore(lcore_id2) < 0)
- return -1;
-
- return 0;
-}
-
-/* Test for catch on invalid watermark values */
-static int
-test_set_watermark( void ){
- unsigned count;
- int setwm;
-
- struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
- if(r == NULL){
- printf( " ring lookup failed\n" );
- goto error;
- }
- count = r->size * 2;
- setwm = rte_ring_set_water_mark(r, count);
- if (setwm != -EINVAL){
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
-
- count = 0;
- rte_ring_set_water_mark(r, count);
- if (r->watermark != r->size) {
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
- return 0;
-
-error:
- return -1;
-}
-
/*
* helper routine for test_ring_basic
*/
@@ -418,8 +286,7 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- printf("test watermark and default bulk enqueue / dequeue\n");
- rte_ring_set_water_mark(r, 20);
+ printf("test default bulk enqueue / dequeue\n");
num_elems = 16;

cur_src = src;
@@ -433,8 +300,8 @@ test_ring_basic(void)
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != -EDQUOT) {
- printf("Watermark not exceeded\n");
+ if (ret != 0) {
+ printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
@@ -930,16 +797,6 @@ test_ring(void)
return -1;

/* basic operations */
- if (test_live_watermark_change() < 0)
- return -1;
-
- if ( test_set_watermark() < 0){
- printf ("Test failed to detect invalid parameter\n");
- return -1;
- }
- else
- printf ( "Test detected forced bad watermark values\n");
-
if ( test_create_count_odd() < 0){
printf ("Test failed to detect odd count\n");
return -1;
--
2.9.3
Bruce Richardson
2017-03-07 11:32:06 UTC
Permalink
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.c | 20 ++++++++++----------
lib/librte_ring/rte_ring.h | 32 ++++++++++++++++----------------
test/test/test_ring.c | 6 +++---
3 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 4bc6da1..80fc356 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -144,11 +144,11 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.watermark = count;
+ r->watermark = count;
r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
- r->prod.size = r->cons.size = count;
- r->prod.mask = r->cons.mask = count-1;
+ r->size = count;
+ r->mask = count - 1;
r->prod.head = r->cons.head = 0;
r->prod.tail = r->cons.tail = 0;

@@ -269,14 +269,14 @@ rte_ring_free(struct rte_ring *r)
int
rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
{
- if (count >= r->prod.size)
+ if (count >= r->size)
return -EINVAL;

/* if count is 0, disable the watermarking */
if (count == 0)
- count = r->prod.size;
+ count = r->size;

- r->prod.watermark = count;
+ r->watermark = count;
return 0;
}

@@ -291,17 +291,17 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)

fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->prod.watermark == r->prod.size)
+ if (r->watermark == r->size)
fprintf(f, " watermark=0\n");
else
- fprintf(f, " watermark=%"PRIu32"\n", r->prod.watermark);
+ fprintf(f, " watermark=%"PRIu32"\n", r->watermark);

/* sum and dump statistics */
#ifdef RTE_LIBRTE_RING_DEBUG
@@ -318,7 +318,7 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
}
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 659c6d0..61c0982 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -151,13 +151,10 @@ struct rte_memzone; /* forward declaration, so as not to require memzone.h */
struct rte_ring_headtail {
volatile uint32_t head; /**< Prod/consumer head. */
volatile uint32_t tail; /**< Prod/consumer tail. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
union {
uint32_t sp_enqueue; /**< True, if single producer. */
uint32_t sc_dequeue; /**< True, if single consumer. */
};
- uint32_t watermark; /**< Max items before EDQUOT in producer. */
};

/**
@@ -177,9 +174,12 @@ struct rte_ring {
* next time the ABI changes
*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
- int flags; /**< Flags supplied at creation. */
+ int flags; /**< Flags supplied at creation. */
const struct rte_memzone *memzone;
/**< Memzone, if any, containing the rte_ring */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -358,7 +358,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
- const uint32_t size = r->prod.size; \
+ const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
@@ -385,7 +385,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
uint32_t idx = cons_head & mask; \
- const uint32_t size = r->cons.size; \
+ const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
obj_table[i] = r->ring[idx]; \
@@ -440,7 +440,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
const unsigned max = n;
int success;
unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;
int ret;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -488,7 +488,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
@@ -547,7 +547,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
unsigned i;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;
int ret;

prod_head = r->prod.head;
@@ -583,7 +583,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
@@ -633,7 +633,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
const unsigned max = n;
int success;
unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -730,7 +730,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
unsigned i;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;

cons_head = r->cons.head;
prod_tail = r->prod.tail;
@@ -1059,7 +1059,7 @@ rte_ring_full(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
+ return ((cons_tail - prod_tail - 1) & r->mask) == 0;
}

/**
@@ -1092,7 +1092,7 @@ rte_ring_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->prod.mask;
+ return (prod_tail - cons_tail) & r->mask;
}

/**
@@ -1108,7 +1108,7 @@ rte_ring_free_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->prod.mask;
+ return (cons_tail - prod_tail - 1) & r->mask;
}

/**
@@ -1122,7 +1122,7 @@ rte_ring_free_count(const struct rte_ring *r)
static inline unsigned int
rte_ring_get_size(const struct rte_ring *r)
{
- return r->prod.size;
+ return r->size;
}

/**
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index ebcb896..5f09097 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -148,7 +148,7 @@ check_live_watermark_change(__attribute__((unused)) void *dummy)
}

/* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->prod.watermark;
+ watermark = r->watermark;
if (watermark != watermark_old &&
(watermark_old != 16 || watermark != 32)) {
printf("Bad watermark change %u -> %u\n", watermark_old,
@@ -213,7 +213,7 @@ test_set_watermark( void ){
printf( " ring lookup failed\n" );
goto error;
}
- count = r->prod.size*2;
+ count = r->size * 2;
setwm = rte_ring_set_water_mark(r, count);
if (setwm != -EINVAL){
printf("Test failed to detect invalid watermark count value\n");
@@ -222,7 +222,7 @@ test_set_watermark( void ){

count = 0;
rte_ring_set_water_mark(r, count);
- if (r->prod.watermark != r->prod.size) {
+ if (r->watermark != r->size) {
printf("Test failed to detect invalid watermark count value\n");
goto error;
}
--
2.9.3
Bruce Richardson
2017-03-07 11:32:10 UTC
Permalink
The bulk fns for rings returns 0 for all elements enqueued and negative
for no space. Change that to make them consistent with the burst functions
in returning the number of elements enqueued/dequeued, i.e. 0 or N.
This change also allows the return value from enq/deq to be used directly
without a branch for error checking.

Signed-off-by: Bruce Richardson <***@intel.com>
---
doc/guides/rel_notes/release_17_05.rst | 11 +++
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
examples/load_balancer/runtime.c | 16 ++-
.../client_server_mp/mp_client/client.c | 8 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/qos_sched/app_thread.c | 8 +-
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 12 ++-
lib/librte_ring/rte_ring.h | 109 +++++++--------------
test/test-pipeline/pipeline_hash.c | 2 +-
test/test-pipeline/runtime.c | 8 +-
test/test/test_ring.c | 46 +++++----
test/test/test_ring_perf.c | 8 +-
14 files changed, 106 insertions(+), 130 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 4e748dc..2b11765 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -120,6 +120,17 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * changed the return value of the enqueue and dequeue bulk functions to
+ match that of the burst equivalents. In all cases, ring functions which
+ operate on multiple packets now return the number of elements enqueued
+ or dequeued, as appropriate. The updated functions are:
+
+ - ``rte_ring_mp_enqueue_bulk``
+ - ``rte_ring_sp_enqueue_bulk``
+ - ``rte_ring_enqueue_bulk``
+ - ``rte_ring_mc_dequeue_bulk``
+ - ``rte_ring_sc_dequeue_bulk``
+ - ``rte_ring_dequeue_bulk``

ABI Changes
-----------
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
index 9b69cfe..e3a63c8 100644
--- a/doc/guides/sample_app_ug/server_node_efd.rst
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -286,7 +286,7 @@ repeated infinitely.

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 6944325..82b10bc 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -146,7 +146,7 @@ app_lcore_io_rx_buffer_to_send (
(void **) lp->rx.mbuf_out[worker].array,
bsz);

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -312,7 +312,7 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
(void **) lp->rx.mbuf_out[worker].array,
lp->rx.mbuf_out[worker].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -349,9 +349,8 @@ app_lcore_io_tx(
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

n_mbufs += bsz_rd;

@@ -505,9 +504,8 @@ app_lcore_worker(
(void **) lp->mbuf_in.array,
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@ -559,7 +557,7 @@ app_lcore_worker(

#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@ -572,7 +570,7 @@ app_lcore_worker(
}
#endif

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -609,7 +607,7 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
(void **) lp->mbuf_out[port].array,
lp->mbuf_out[port].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index d4f9ca3..dca9eb9 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -276,14 +276,10 @@ main(int argc, char *argv[])
printf("[Press Ctrl-C to quit ...]\n");

for (;;) {
- uint16_t i, rx_pkts = PKT_READ_SIZE;
+ uint16_t i, rx_pkts;
uint8_t port;

- /* try dequeuing max possible packets first, if that fails, get the
- * most we can. Loop body should only execute once, maximum */
- while (rx_pkts > 0 &&
- unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
- rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index a6dc12d..19c95b2 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) != 0){
+ cl_rx_buf[client].count) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 70fdcdb..dab4594 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) != 0)) {
+ (void **)rx_mbufs, nb_rx) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -180,7 +180,7 @@ app_tx_thread(struct thread_conf **confs)
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
burst_conf.qos_dequeue);
- if (likely(retval == 0)) {
+ if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

conf->counter = 0; /* reset empty read loop counter */
@@ -230,7 +230,9 @@ app_worker_thread(struct thread_conf **confs)
nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
- while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
+ (void **)mbufs, nb_pkt) == 0)
+ ; /* empty body */

conf_idx++;
if (confs[conf_idx] == NULL)
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index a6c0c70..9ec6a05 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) != 0))
+ rx_pkts) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 1a54d1b..3eb7fac 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index b9aa64d..409b860 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -42,26 +42,30 @@ static int
common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index e7061be..5f6589f 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -352,14 +352,10 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -391,7 +387,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -417,7 +413,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -433,14 +429,10 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -460,7 +452,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -477,7 +469,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -498,16 +490,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/

-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -539,7 +526,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
/* Set the actual entries for dequeue */
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -565,7 +552,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

r->cons.tail = cons_next;

- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -583,15 +570,10 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -610,7 +592,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,

if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -626,7 +608,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -642,10 +624,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -662,10 +643,9 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -686,10 +666,9 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -716,7 +695,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -733,7 +712,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -754,10 +733,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue(r, obj);
- else
- return rte_ring_mp_enqueue(r, obj);
+ return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -773,11 +749,9 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -794,11 +768,9 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -818,11 +790,9 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue, no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
if (r->cons.sc_dequeue)
@@ -849,7 +819,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -867,7 +837,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -889,10 +859,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue(r, obj_p);
- else
- return rte_ring_mc_dequeue(r, obj_p);
+ return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 10d2869..1ac0aa8 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -547,6 +547,6 @@ app_main_loop_rx_metadata(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 42a6142..4e20669 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -98,7 +98,7 @@ app_main_loop_rx(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -123,7 +123,7 @@ app_main_loop_worker(void) {
(void **) worker_mbuf->array,
app.burst_size_worker_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

do {
@@ -131,7 +131,7 @@ app_main_loop_worker(void) {
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
app.burst_size_worker_write);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -152,7 +152,7 @@ app_main_loop_tx(void) {
(void **) &app.mbuf_tx[i].array[n_mbufs],
app.burst_size_tx_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

n_mbufs += app.burst_size_tx_read;
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 666a451..112433b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -117,20 +117,18 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rand));
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rsz));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -171,37 +169,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -217,37 +215,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -264,11 +262,11 @@ test_ring_basic(void)
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
}

@@ -294,25 +292,25 @@ test_ring_basic(void)

ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue2\n");
goto fail;
}
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 320c20c..8ccbdef 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();
--
2.9.3
Olivier MATZ
2017-03-08 10:22:40 UTC
Permalink
Post by Bruce Richardson
The bulk fns for rings returns 0 for all elements enqueued and negative
for no space. Change that to make them consistent with the burst functions
in returning the number of elements enqueued/dequeued, i.e. 0 or N.
This change also allows the return value from enq/deq to be used directly
without a branch for error checking.
[...]
Post by Bruce Richardson
@@ -716,7 +695,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}
/**
I'm wondering if these functions (enqueue/dequeue of one element) should
be modified to return 0 (fail) or 1 (success) too, for consistency with
the bulk functions.

Any opinion?



Olivier
Bruce Richardson
2017-03-08 12:08:42 UTC
Permalink
Post by Olivier MATZ
Post by Bruce Richardson
The bulk fns for rings returns 0 for all elements enqueued and negative
for no space. Change that to make them consistent with the burst functions
in returning the number of elements enqueued/dequeued, i.e. 0 or N.
This change also allows the return value from enq/deq to be used directly
without a branch for error checking.
[...]
Post by Bruce Richardson
@@ -716,7 +695,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}
/**
I'm wondering if these functions (enqueue/dequeue of one element) should
be modified to return 0 (fail) or 1 (success) too, for consistency with
the bulk functions.
Any opinion?
I thought about that, but I would view it as risky, unless we want to go
changing the parameters to the function also, as the compiler won't flag
a change in return value like that.

/Bruce
Olivier Matz
2017-03-14 08:56:35 UTC
Permalink
Post by Bruce Richardson
Post by Olivier MATZ
Post by Bruce Richardson
The bulk fns for rings returns 0 for all elements enqueued and negative
for no space. Change that to make them consistent with the burst functions
in returning the number of elements enqueued/dequeued, i.e. 0 or N.
This change also allows the return value from enq/deq to be used directly
without a branch for error checking.
[...]
Post by Bruce Richardson
@@ -716,7 +695,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}
/**
I'm wondering if these functions (enqueue/dequeue of one element) should
be modified to return 0 (fail) or 1 (success) too, for consistency with
the bulk functions.
Any opinion?
I thought about that, but I would view it as risky, unless we want to go
changing the parameters to the function also, as the compiler won't flag
a change in return value like that.
Ok, I have no better solution anyway.

Olivier
Bruce Richardson
2017-03-07 11:32:11 UTC
Permalink
Add an extra parameter to the ring enqueue burst/bulk functions so that
those functions can optionally return the amount of free space in the
ring. This information can be used by applications in a number of ways,
for instance, with single-producer queues, it provides a max
enqueue size which is guaranteed to work. It can also be used to
implement watermark functionality in apps, replacing the older
functionality with a more flexible version, which enables apps to
implement multiple watermark thresholds, rather than just one.

Signed-off-by: Bruce Richardson <***@intel.com>
---
doc/guides/rel_notes/release_17_05.rst | 3 +
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 3 +-
examples/load_balancer/runtime.c | 12 ++-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 7 +-
examples/qos_sched/app_thread.c | 4 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 28 ++++---
lib/librte_ring/rte_ring.h | 89 +++++++++++-----------
test/test-pipeline/pipeline_hash.c | 3 +-
test/test-pipeline/runtime.c | 5 +-
test/test/test_link_bonding_mode4.c | 3 +-
test/test/test_pmd_ring_perf.c | 5 +-
test/test/test_ring.c | 55 ++++++-------
test/test/test_ring_perf.c | 16 ++--
test/test/test_table_ports.c | 4 +-
test/test/virtual_pmd.c | 4 +-
22 files changed, 139 insertions(+), 118 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 2b11765..249ad6e 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -120,6 +120,9 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * added an extra parameter to the burst/bulk enqueue functions to
+ return the number of free spaces in the ring after enqueue. This can
+ be used by an application to implement its own watermark functionality.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 6f9cc1a..adbf478 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -102,7 +102,7 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SP_ENQ) {
r->tx_pkts.cnt += nb_tx;
r->err_pkts.cnt += nb_bufs - nb_tx;
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index e7641d2..cfd360b 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -238,7 +238,8 @@ lcore_rx(struct lcore_params *p)
continue;
}

- uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
+ uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs,
+ nb_ret, NULL);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
RTE_LOG_DP(DEBUG, DISTRAPP,
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 82b10bc..1645994 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -144,7 +144,8 @@ app_lcore_io_rx_buffer_to_send (
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- bsz);
+ bsz,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -310,7 +311,8 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- lp->rx.mbuf_out[worker].n_mbufs);
+ lp->rx.mbuf_out[worker].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -553,7 +555,8 @@ app_lcore_worker(
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- bsz_wr);
+ bsz_wr,
+ NULL);

#if APP_STATS
lp->rings_out_iters[port] ++;
@@ -605,7 +608,8 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- lp->mbuf_out[port].n_mbufs);
+ lp->mbuf_out[port].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index 19c95b2..c2b0261 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) == 0){
+ cl_rx_buf[client].count, NULL) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index d4dc789..d268350 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -421,8 +421,8 @@ rx_thread(struct rte_ring *ring_out)
pkts[i++]->seqn = seqn++;

/* enqueue to rx_to_workers ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
- nb_rx_pkts);
+ ret = rte_ring_enqueue_burst(ring_out,
+ (void *)pkts, nb_rx_pkts, NULL);
app_stats.rx.enqueue_pkts += ret;
if (unlikely(ret < nb_rx_pkts)) {
app_stats.rx.enqueue_failed_pkts +=
@@ -473,7 +473,8 @@ worker_thread(void *args_ptr)
burst_buffer[i++]->port ^= xor_val;

/* enqueue the modified mbufs to workers_to_tx ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
+ ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
+ burst_size, NULL);
__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
if (unlikely(ret < burst_size)) {
/* Return the mbufs to their respective pool, dropping packets */
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index dab4594..0c81a15 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) == 0)) {
+ (void **)rx_mbufs, nb_rx, NULL) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -231,7 +231,7 @@ app_worker_thread(struct thread_conf **confs)
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
- (void **)mbufs, nb_pkt) == 0)
+ (void **)mbufs, nb_pkt, NULL) == 0)
; /* empty body */

conf_idx++;
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 3eb7fac..597b4c2 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != cl_rx_buf[node].count){
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 51db006..6552199 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -808,7 +808,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
/* Need to enqueue the free slots in global ring. */
n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
cached_free_slots->objs,
- LCORE_CACHE_SIZE);
+ LCORE_CACHE_SIZE, NULL);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 409b860..9b8fd2b 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -43,7 +43,7 @@ common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_mp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
@@ -51,7 +51,7 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_sp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index a580a6a..d6d3e46 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -197,7 +197,7 @@ pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
dup_bufs[d_pkts++] = p;
}

- ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts);
+ ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
if (unlikely(ring_enq < d_pkts)) {
RTE_LOG(DEBUG, PDUMP,
"only %d of packets enqueued to ring\n", ring_enq);
diff --git a/lib/librte_port/rte_port_ras.c b/lib/librte_port/rte_port_ras.c
index c4bb508..4de0945 100644
--- a/lib/librte_port/rte_port_ras.c
+++ b/lib/librte_port/rte_port_ras.c
@@ -167,7 +167,7 @@ send_burst(struct rte_port_ring_writer_ras *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 3b9d3d0..9fadac7 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -241,7 +241,7 @@ send_burst(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -256,7 +256,7 @@ send_burst_mp(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -318,11 +318,11 @@ rte_port_ring_writer_tx_bulk_internal(void *port,

RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
- n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
- n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
@@ -517,7 +517,7 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -527,7 +527,8 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_sp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -550,7 +551,7 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -560,7 +561,8 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_mp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -633,10 +635,12 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
n_pkts_ok =
- rte_ring_mp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
n_pkts_ok =
- rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

if (n_pkts_ok >= n_pkts)
return 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 5f6589f..73b1c26 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -357,20 +357,16 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, prod_next;
uint32_t cons_tail, free_entries;
- const unsigned max = n;
+ const unsigned int max = n;
int success;
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move prod.head atomically */
do {
/* Reset n to the initial burst count */
@@ -385,16 +381,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = (mask + cons_tail - prod_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : free_entries;
+
+ if (n == 0)
+ goto end;

prod_next = prod_head + n;
success = rte_atomic32_cmpset(&r->prod.head, prod_head,
@@ -413,6 +405,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -434,7 +429,8 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
@@ -450,16 +446,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = mask + cons_tail - prod_head;

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+
+ if (n == 0)
+ goto end;
+

prod_next = prod_head + n;
r->prod.head = prod_next;
@@ -469,6 +461,9 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -628,9 +623,10 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -647,9 +643,10 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -670,12 +667,12 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
}

/**
@@ -695,7 +692,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -712,7 +709,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -733,7 +730,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -979,9 +976,10 @@ struct rte_ring *rte_ring_lookup(const char *name);
*/
static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -998,9 +996,10 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -1021,12 +1020,12 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_burst(r, obj_table, n);
+ return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_burst(r, obj_table, n);
+ return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
}

/**
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 1ac0aa8..0c6e04f 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -546,7 +546,8 @@ app_main_loop_rx_metadata(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs,
+ NULL);
} while (ret == 0);
}
}
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 4e20669..c06ff54 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -97,7 +97,7 @@ app_main_loop_rx(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs, NULL);
} while (ret == 0);
}
}
@@ -130,7 +130,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
- app.burst_size_worker_write);
+ app.burst_size_worker_write,
+ NULL);
} while (ret == 0);
}
}
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 53caa3e..8df28b4 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -206,7 +206,8 @@ slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
static int
slave_put_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf, size);
+ return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf,
+ size, NULL);
}

static uint16_t
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index af011f7..045a7f2 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -98,7 +98,7 @@ test_single_enqueue_dequeue(void)
const uint64_t sc_start = rte_rdtsc_precise();
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
- rte_ring_enqueue_bulk(r, &burst, 1);
+ rte_ring_enqueue_bulk(r, &burst, 1, NULL);
rte_ring_dequeue_bulk(r, &burst, 1);
}
const uint64_t sc_end = rte_rdtsc_precise();
@@ -131,7 +131,8 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 112433b..b0ca88b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -117,11 +117,12 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
+ NULL) != 0);
TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
@@ -167,19 +168,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -213,19 +214,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -260,7 +261,7 @@ test_ring_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -290,13 +291,13 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
@@ -371,19 +372,19 @@ test_ring_burst_basic(void)

printf("Test SP & SC basic functions \n");
printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK) ;
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -419,7 +420,7 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
goto fail;
@@ -427,14 +428,14 @@ test_ring_burst_basic(void)
}

printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
/* Always one free entry left */
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -444,7 +445,7 @@ test_ring_burst_basic(void)
goto fail;

printf("Test enqueue for a full entry \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;

@@ -486,19 +487,19 @@ test_ring_burst_basic(void)
printf("Test MP & MC basic functions \n");

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -534,7 +535,7 @@ test_ring_burst_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -557,19 +558,19 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK objects */
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -607,7 +608,7 @@ test_ring_burst_basic(void)

printf("Covering rte_ring_enqueue_burst functions \n");

- ret = rte_ring_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
@@ -746,7 +747,7 @@ test_ring_basic_ex(void)
}

/* Covering the ring burst operation */
- ret = rte_ring_enqueue_burst(rp, obj, 2);
+ ret = rte_ring_enqueue_burst(rp, obj, 2, NULL);
if ((ret & RTE_RING_SZ_MASK) != 2) {
printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
goto fail_test;
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 8ccbdef..f95a8e9 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -323,14 +323,16 @@ test_burst_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
@@ -357,14 +359,16 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
diff --git a/test/test/test_table_ports.c b/test/test/test_table_ports.c
index 2532367..395f4f3 100644
--- a/test/test/test_table_ports.c
+++ b/test/test/test_table_ports.c
@@ -80,7 +80,7 @@ test_port_ring_reader(void)
mbuf[0] = (void *)rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- mbuf, 1);
+ mbuf, 1, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);

if (received_pkts < expected_pkts)
@@ -93,7 +93,7 @@ test_port_ring_reader(void)
mbuf[i] = rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
RTE_PORT_IN_BURST_SIZE_MAX);

diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 6e4dcd8..39e070c 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -380,7 +380,7 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
nb_pkts = 0;
else
nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increment opacket count */
dev_private->eth_stats.opackets += nb_pkts;
@@ -496,7 +496,7 @@ virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
vrtl_eth_dev->data->dev_private;

return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

int
--
2.9.3
Bruce Richardson
2017-03-07 11:32:12 UTC
Permalink
Add an extra parameter to the ring dequeue burst/bulk functions so that
those functions can optionally return the amount of remaining objs in the
ring. This information can be used by applications in a number of ways,
for instance, with single-consumer queues, it provides a max
dequeue size which is guaranteed to work.

Signed-off-by: Bruce Richardson <***@intel.com>
---
app/pdump/main.c | 2 +-
doc/guides/rel_notes/release_17_05.rst | 8 ++
drivers/crypto/null/null_crypto_pmd.c | 2 +-
drivers/net/bonding/rte_eth_bond_pmd.c | 3 +-
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 2 +-
examples/load_balancer/runtime.c | 6 +-
.../client_server_mp/mp_client/client.c | 3 +-
examples/packet_ordering/main.c | 6 +-
examples/qos_sched/app_thread.c | 6 +-
examples/quota_watermark/qw/main.c | 5 +-
examples/server_node_efd/node/node.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 3 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_port/rte_port_frag.c | 3 +-
lib/librte_port/rte_port_ring.c | 6 +-
lib/librte_ring/rte_ring.h | 90 +++++++++++-----------
test/test-pipeline/runtime.c | 6 +-
test/test/test_link_bonding_mode4.c | 3 +-
test/test/test_pmd_ring_perf.c | 7 +-
test/test/test_ring.c | 54 ++++++-------
test/test/test_ring_perf.c | 20 +++--
test/test/test_table_acl.c | 2 +-
test/test/test_table_pipeline.c | 2 +-
test/test/test_table_ports.c | 8 +-
test/test/virtual_pmd.c | 4 +-
26 files changed, 145 insertions(+), 114 deletions(-)

diff --git a/app/pdump/main.c b/app/pdump/main.c
index b88090d..3b13753 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -496,7 +496,7 @@ pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)

/* first dequeue packets from ring of primary process */
const uint16_t nb_in_deq = rte_ring_dequeue_burst(ring,
- (void *)rxtx_bufs, BURST_SIZE);
+ (void *)rxtx_bufs, BURST_SIZE, NULL);
stats->dequeue_pkts += nb_in_deq;

if (nb_in_deq) {
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 249ad6e..563a74c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -123,6 +123,8 @@ API Changes
* added an extra parameter to the burst/bulk enqueue functions to
return the number of free spaces in the ring after enqueue. This can
be used by an application to implement its own watermark functionality.
+ * added an extra parameter to the burst/bulk dequeue functions to return
+ the number elements remaining in the ring after dequeue.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
@@ -135,6 +137,12 @@ API Changes
- ``rte_ring_sc_dequeue_bulk``
- ``rte_ring_dequeue_bulk``

+ NOTE: the above functions all have different parameters as well as
+ different return values, due to the other listed changes above. This
+ means that all instances of the functions in existing code will be
+ flagged by the compiler. The return value usage should be checked
+ while fixing the compiler error due to the extra parameter.
+
ABI Changes
-----------

diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index ed5a9fc..f68ec8d 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -155,7 +155,7 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index f3ac9e2..96638af 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1008,7 +1008,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
struct port *port = &mode_8023ad_ports[slaves[i]];

slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
+ slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
+ NULL);
slave_nb_pkts[i] = slave_slow_nb_pkts[i];

for (j = 0; j < slave_slow_nb_pkts[i]; j++)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index adbf478..77ef3a1 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -88,7 +88,7 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts.cnt += nb_rx;
else
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index cfd360b..5cb6185 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -330,7 +330,7 @@ lcore_tx(struct rte_ring *in_r)

struct rte_mbuf *bufs[BURST_SIZE];
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
- (void *)bufs, BURST_SIZE);
+ (void *)bufs, BURST_SIZE, NULL);
app_stats.tx.dequeue_pkts += nb_rx;

/* if we get no traffic, flush anything we have */
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 1645994..8192c08 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -349,7 +349,8 @@ app_lcore_io_tx(
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
- bsz_rd);
+ bsz_rd,
+ NULL);

if (unlikely(ret == 0))
continue;
@@ -504,7 +505,8 @@ app_lcore_worker(
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
- bsz_rd);
+ bsz_rd,
+ NULL);

if (unlikely(ret == 0))
continue;
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index dca9eb9..01b535c 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -279,7 +279,8 @@ main(int argc, char *argv[])
uint16_t i, rx_pkts;
uint8_t port;

- rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
+ PKT_READ_SIZE, NULL);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index d268350..7719dad 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -462,7 +462,7 @@ worker_thread(void *args_ptr)

/* dequeue the mbufs from rx_to_workers ring */
burst_size = rte_ring_dequeue_burst(ring_in,
- (void *)burst_buffer, MAX_PKTS_BURST);
+ (void *)burst_buffer, MAX_PKTS_BURST, NULL);
if (unlikely(burst_size == 0))
continue;

@@ -510,7 +510,7 @@ send_thread(struct send_thread_args *args)

/* deque the mbufs from workers_to_tx ring */
nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);

if (unlikely(nb_dq_mbufs == 0))
continue;
@@ -595,7 +595,7 @@ tx_thread(struct rte_ring *ring_in)

/* deque the mbufs from workers_to_tx ring */
dqnum = rte_ring_dequeue_burst(ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);

if (unlikely(dqnum == 0))
continue;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 0c81a15..15f117f 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -179,7 +179,7 @@ app_tx_thread(struct thread_conf **confs)

while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
- burst_conf.qos_dequeue);
+ burst_conf.qos_dequeue, NULL);
if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

@@ -218,7 +218,7 @@ app_worker_thread(struct thread_conf **confs)

/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
@@ -254,7 +254,7 @@ app_mixed_thread(struct thread_conf **confs)

/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 57df8ef..2dcddea 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -247,7 +247,8 @@ pipeline_stage(__attribute__((unused)) void *args)
}

/* Dequeue up to quota mbuf from rx */
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
if (unlikely(nb_dq_pkts < 0))
continue;

@@ -305,7 +306,7 @@ send_stage(__attribute__((unused)) void *args)

/* Dequeue packets from tx and send them */
nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
- (void *) tx_pkts, *quota);
+ (void *) tx_pkts, *quota, NULL);
rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);

/* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index 9ec6a05..f780b92 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) == 0))
+ rx_pkts, NULL) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6552199..645c0cf 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -536,7 +536,8 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
if (cached_free_slots->len == 0) {
/* Need to get another burst of free slots from global ring */
n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
- cached_free_slots->objs, LCORE_CACHE_SIZE);
+ cached_free_slots->objs,
+ LCORE_CACHE_SIZE, NULL);
if (n_slots == 0)
return -ENOSPC;

diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 9b8fd2b..5c132bf 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -58,14 +58,14 @@ static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_mc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_sc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_port/rte_port_frag.c b/lib/librte_port/rte_port_frag.c
index 0fcace9..320407e 100644
--- a/lib/librte_port/rte_port_frag.c
+++ b/lib/librte_port/rte_port_frag.c
@@ -186,7 +186,8 @@ rte_port_ring_reader_frag_rx(void *port,
/* If "pkts" buffer is empty, read packet burst from ring */
if (p->n_pkts == 0) {
p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
- (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX,
+ NULL);
RTE_PORT_RING_READER_FRAG_STATS_PKTS_IN_ADD(p, p->n_pkts);
if (p->n_pkts == 0)
return n_pkts_out;
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 9fadac7..492b0e7 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -111,7 +111,8 @@ rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;

- nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

return nb_rx;
@@ -124,7 +125,8 @@ rte_port_ring_multi_reader_rx(void *port, struct rte_mbuf **pkts,
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;

- nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

return nb_rx;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 73b1c26..ca25dd7 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -491,7 +491,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
@@ -500,11 +501,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
@@ -519,15 +515,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = (prod_tail - cons_head);

/* Set the actual entries for dequeue */
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(n == 0))
+ goto end;

cons_next = cons_head + n;
success = rte_atomic32_cmpset(&r->cons.head, cons_head,
@@ -546,7 +538,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_pause();

r->cons.tail = cons_next;
-
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}

@@ -570,7 +564,8 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
@@ -585,15 +580,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* and size(ring)-1. */
entries = prod_tail - cons_head;

- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(entries == 0))
+ goto end;

cons_next = cons_head + n;
r->cons.head = cons_next;
@@ -603,6 +594,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}

@@ -749,9 +743,11 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}

/**
@@ -768,9 +764,11 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}

/**
@@ -790,12 +788,13 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
+ unsigned int *available)
{
if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
}

/**
@@ -816,7 +815,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -834,7 +833,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -856,7 +855,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -1046,9 +1045,11 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}

/**
@@ -1066,9 +1067,11 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}

/**
@@ -1088,12 +1091,13 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* - Number of objects dequeued
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_burst(r, obj_table, n);
+ return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_burst(r, obj_table, n);
+ return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
}

#ifdef __cplusplus
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index c06ff54..8970e1c 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -121,7 +121,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_rx[i],
(void **) worker_mbuf->array,
- app.burst_size_worker_read);
+ app.burst_size_worker_read,
+ NULL);

if (ret == 0)
continue;
@@ -151,7 +152,8 @@ app_main_loop_tx(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_tx[i],
(void **) &app.mbuf_tx[i].array[n_mbufs],
- app.burst_size_tx_read);
+ app.burst_size_tx_read,
+ NULL);

if (ret == 0)
continue;
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 8df28b4..15091b1 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -193,7 +193,8 @@ static uint8_t lacpdu_rx_count[RTE_MAX_ETHPORTS] = {0, };
static int
slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf, size);
+ return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf,
+ size, NULL);
}

/*
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index 045a7f2..004882a 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -67,7 +67,7 @@ test_empty_dequeue(void)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();

const uint64_t eth_start = rte_rdtsc();
@@ -99,7 +99,7 @@ test_single_enqueue_dequeue(void)
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
rte_ring_enqueue_bulk(r, &burst, 1, NULL);
- rte_ring_dequeue_bulk(r, &burst, 1);
+ rte_ring_dequeue_bulk(r, &burst, 1, NULL);
}
const uint64_t sc_end = rte_rdtsc_precise();
rte_compiler_barrier();
@@ -133,7 +133,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, (void *)burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index b0ca88b..858ebc1 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -119,7 +119,8 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
__func__, i, rand);
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
NULL) != 0);
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand,
+ NULL) == rand);

/* fill the ring */
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
@@ -129,7 +130,8 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz,
+ NULL) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -186,19 +188,19 @@ test_ring_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -232,19 +234,19 @@ test_ring_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -265,7 +267,7 @@ test_ring_basic(void)
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -303,13 +305,13 @@ test_ring_basic(void)
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue2\n");
@@ -390,19 +392,19 @@ test_ring_burst_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1) ;
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -451,19 +453,19 @@ test_ring_burst_basic(void)

printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK entries */
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -505,19 +507,19 @@ test_ring_burst_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -539,7 +541,7 @@ test_ring_burst_basic(void)
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -578,19 +580,19 @@ test_ring_burst_basic(void)

printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available objects - the exact MAX_BULK */
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -613,7 +615,7 @@ test_ring_burst_basic(void)
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret != 2)
goto fail;
@@ -753,7 +755,7 @@ test_ring_basic_ex(void)
goto fail_test;
}

- ret = rte_ring_dequeue_burst(rp, obj, 2);
+ ret = rte_ring_dequeue_burst(rp, obj, 2, NULL);
if (ret != 2) {
printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
goto fail_test;
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index f95a8e9..ed89896 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -152,12 +152,12 @@ test_empty_dequeue(void)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t mc_end = rte_rdtsc();

printf("SC empty dequeue: %.2F\n",
@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();

@@ -325,7 +325,8 @@ test_burst_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

@@ -333,7 +334,8 @@ test_burst_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();

@@ -361,7 +363,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

@@ -369,7 +372,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();

diff --git a/test/test/test_table_acl.c b/test/test/test_table_acl.c
index b3bfda4..4d43be7 100644
--- a/test/test/test_table_acl.c
+++ b/test/test/test_table_acl.c
@@ -713,7 +713,7 @@ test_pipeline_single_filter(int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;

- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0) {
printf("Got no objects from ring %d - error code %d\n",
i, ret);
diff --git a/test/test/test_table_pipeline.c b/test/test/test_table_pipeline.c
index 36bfeda..b58aa5d 100644
--- a/test/test/test_table_pipeline.c
+++ b/test/test/test_table_pipeline.c
@@ -494,7 +494,7 @@ test_pipeline_single_filter(int test_type, int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;

- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0)
printf("Got no objects from ring %d - error code %d\n",
i, ret);
diff --git a/test/test/test_table_ports.c b/test/test/test_table_ports.c
index 395f4f3..39592ce 100644
--- a/test/test/test_table_ports.c
+++ b/test/test/test_table_ports.c
@@ -163,7 +163,7 @@ test_port_ring_writer(void)
rte_port_ring_writer_ops.f_flush(port);
expected_pkts = 1;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -7;
@@ -178,7 +178,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -8;
@@ -193,7 +193,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -8;
@@ -208,7 +208,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -9;
diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 39e070c..b209355 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -342,7 +342,7 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
dev_private = vrtl_eth_dev->data->dev_private;

rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increments ipackets count */
dev_private->eth_stats.ipackets += rx_count;
@@ -508,7 +508,7 @@ virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,

dev_private = vrtl_eth_dev->data->dev_private;
return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

static uint8_t
--
2.9.3
Bruce Richardson
2017-03-07 11:32:13 UTC
Permalink
Now that the enqueue function returns the amount of space in the ring,
we can use that to replace the old watermark functionality. Update the
example app to do so, and re-enable it in the examples Makefile.

Signed-off-by: Bruce Richardson <***@intel.com>
---
examples/Makefile | 2 +-
examples/quota_watermark/qw/init.c | 5 +++--
examples/quota_watermark/qw/main.c | 16 ++++++++++------
examples/quota_watermark/qw/main.h | 1 +
examples/quota_watermark/qwctl/commands.c | 4 ++--
examples/quota_watermark/qwctl/qwctl.c | 2 ++
examples/quota_watermark/qwctl/qwctl.h | 1 +
7 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/examples/Makefile b/examples/Makefile
index 19cd5ad..da2bfdd 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-#DIRS-y += quota_watermark
+DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index 95a9f94..6babfea 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -140,7 +140,7 @@ void init_ring(int lcore_id, uint8_t port_id)
if (ring == NULL)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

- rte_ring_set_water_mark(ring, 80 * RING_SIZE / 100);
+ *high_watermark = 80 * RING_SIZE / 100;

rings[lcore_id][port_id] = ring;
}
@@ -168,10 +168,11 @@ setup_shared_variables(void)
const struct rte_memzone *qw_memzone;

qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
- 2 * sizeof(int), rte_socket_id(), 0);
+ 3 * sizeof(int), rte_socket_id(), 0);
if (qw_memzone == NULL)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

quota = qw_memzone->addr;
low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 2dcddea..bdb8a43 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -67,6 +67,7 @@ struct ether_fc_frame {

int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;

uint8_t port_pairs[RTE_MAX_ETHPORTS];

@@ -158,6 +159,7 @@ receive_stage(__attribute__((unused)) void *args)
uint16_t nb_rx_pkts;

unsigned int lcore_id;
+ unsigned int free;

struct rte_mbuf *pkts[MAX_PKT_QUOTA];
struct rte_ring *ring;
@@ -189,13 +191,13 @@ receive_stage(__attribute__((unused)) void *args)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
(uint16_t) *quota);
ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
- nb_rx_pkts);
- if (ret == -EDQUOT) {
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
ring_state[port_id] = RING_OVERLOADED;
send_pause_frame(port_id, 1337);
}

- else if (ret == -ENOBUFS) {
+ if (ret == 0) {

/*
* Return mbufs to the pool,
@@ -217,6 +219,7 @@ pipeline_stage(__attribute__((unused)) void *args)
uint8_t port_id;

unsigned int lcore_id, previous_lcore_id;
+ unsigned int free;

void *pkts[MAX_PKT_QUOTA];
struct rte_ring *rx, *tx;
@@ -253,11 +256,12 @@ pipeline_stage(__attribute__((unused)) void *args)
continue;

/* Enqueue them on tx */
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
ring_state[port_id] = RING_OVERLOADED;

- else if (ret == -ENOBUFS) {
+ if (ret == 0) {

/*
* Return mbufs to the pool,
diff --git a/examples/quota_watermark/qw/main.h b/examples/quota_watermark/qw/main.h
index 545ba42..8c8e311 100644
--- a/examples/quota_watermark/qw/main.h
+++ b/examples/quota_watermark/qw/main.h
@@ -43,6 +43,7 @@ enum ring_state {

extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;

extern uint8_t port_pairs[RTE_MAX_ETHPORTS];

diff --git a/examples/quota_watermark/qwctl/commands.c b/examples/quota_watermark/qwctl/commands.c
index 036bf80..5cac0e1 100644
--- a/examples/quota_watermark/qwctl/commands.c
+++ b/examples/quota_watermark/qwctl/commands.c
@@ -140,8 +140,8 @@ cmd_set_handler(__attribute__((unused)) void *parsed_result,
else
if (tokens->value >= *low_watermark * 100 / RING_SIZE
&& tokens->value <= 100)
- rte_ring_set_water_mark(ring,
- tokens->value * RING_SIZE / 100);
+ *high_watermark = tokens->value *
+ RING_SIZE / 100;
else
cmdline_printf(cl,
"ring high watermark must be between %u%% and 100%%\n",
diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c
index 3a85cc3..7e7a396 100644
--- a/examples/quota_watermark/qwctl/qwctl.c
+++ b/examples/quota_watermark/qwctl/qwctl.c
@@ -55,6 +55,7 @@

int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;


static void
@@ -68,6 +69,7 @@ setup_shared_variables(void)

quota = qw_memzone->addr;
low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}

int main(int argc, char **argv)
diff --git a/examples/quota_watermark/qwctl/qwctl.h b/examples/quota_watermark/qwctl/qwctl.h
index 8d146e5..545914b 100644
--- a/examples/quota_watermark/qwctl/qwctl.h
+++ b/examples/quota_watermark/qwctl/qwctl.h
@@ -36,5 +36,6 @@

extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;

#endif /* _MAIN_H_ */
--
2.9.3
Bruce Richardson
2017-03-07 11:32:14 UTC
Permalink
The local variable i is only used for loop control so define it in
the enqueue and dequeue blocks directly, rather than at the function
level.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.h | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index ca25dd7..4e5219a 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -288,6 +288,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
+ unsigned int i; \
const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
@@ -314,6 +315,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
+ unsigned int i; \
uint32_t idx = cons_head & mask; \
const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
@@ -364,7 +366,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned int max = n;
int success;
- unsigned int i;
uint32_t mask = r->mask;

/* move prod.head atomically */
@@ -434,7 +435,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned int i;
uint32_t mask = r->mask;

prod_head = r->prod.head;
@@ -498,7 +498,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned int i;
uint32_t mask = r->mask;

/* move cons.head atomically */
@@ -569,7 +568,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-03-07 11:32:15 UTC
Permalink
We can write a single common function for head manipulation for enq
and a common one for deq, allowing us to have a single worker function
for enq and deq, rather than two of each. Update all other inline
functions to use the new functions.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.c | 4 +-
lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------
2 files changed, 149 insertions(+), 183 deletions(-)

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 18fb644..4776079 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,8 +138,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
- r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
+ r->prod.sp_enqueue = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;
+ r->cons.sc_dequeue = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;
r->size = count;
r->mask = count - 1;
r->prod.head = r->cons.head = 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 4e5219a..d2ebc9d 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -172,6 +172,12 @@ struct rte_ring {
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

+/* @internal defines for passing to the enqueue dequeue worker functions */
+#define __IS_SP 1
+#define __IS_MP 0
+#define __IS_SC 1
+#define __IS_MC 0
+
/**
* Calculate the memory size needed for a ring
*
@@ -290,7 +296,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
#define ENQUEUE_PTRS() do { \
unsigned int i; \
const uint32_t size = r->size; \
- uint32_t idx = prod_head & mask; \
+ uint32_t idx = prod_head & r->mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
r->ring[idx] = obj_table[i]; \
@@ -316,7 +322,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
unsigned int i; \
- uint32_t idx = cons_head & mask; \
+ uint32_t idx = cons_head & r->mask; \
const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
@@ -339,83 +345,72 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} while (0)

/**
- * @internal Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
+ * @internal This function updates the producer head for enqueue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to the ring structure
+ * @param is_sp
+ * Indicates whether multi-producer path is needed or not
* @param n
- * The number of objects to add in the ring from the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ * Returns the amount of free space in the ring BEFORE head was moved
* @return
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *free_entries)
{
- uint32_t prod_head, prod_next;
- uint32_t cons_tail, free_entries;
- const unsigned int max = n;
+ const uint32_t mask = r->mask;
+ unsigned int max = n;
int success;
- uint32_t mask = r->mask;

- /* move prod.head atomically */
do {
/* Reset n to the initial burst count */
n = max;

- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
+ *old_head = r->prod.head;
+ const uint32_t cons_tail = r->cons.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
+ * *old_head > cons_tail). So 'free_entries' is always between 0
* and size(ring)-1. */
- free_entries = (mask + cons_tail - prod_head);
+ *free_entries = (mask + cons_tail - *old_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries))
+ if (unlikely(n > *free_entries))
n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : free_entries;
+ 0 : *free_entries;

if (n == 0)
- goto end;
-
- prod_next = prod_head + n;
- success = rte_atomic32_cmpset(&r->prod.head, prod_head,
- prod_next);
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sp)
+ r->prod.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->prod.head,
+ *old_head, *new_head);
} while (unlikely(success == 0));
-
- /* write entries in ring */
- ENQUEUE_PTRS();
- rte_smp_wmb();
-
- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-end:
- if (free_space != NULL)
- *free_space = free_entries - n;
return n;
}

/**
- * @internal Enqueue several objects on a ring (NOT multi-producers safe).
+ * @internal Enqueue several objects on the ring
*
- * @param r
+ * @param r
* A pointer to the ring structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
@@ -423,44 +418,40 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param is_sp
+ * Indicates whether to use single producer or multi-producer head update
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
* @return
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- uint32_t mask = r->mask;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;

+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
if (n == 0)
goto end;

-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
ENQUEUE_PTRS();
rte_smp_wmb();

+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->prod.tail != prod_head))
+ rte_pause();
+
r->prod.tail = prod_next;
+
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -468,130 +459,112 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
}

/**
- * @internal Dequeue several objects from a ring (multi-consumers safe). When
- * the request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
+ * @internal This function updates the consumer head for dequeue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to the ring structure
+ * @param is_sc
+ * Indicates whether multi-consumer path is needed or not
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ * Returns the number of entries in the ring BEFORE head was moved
* @return
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *entries)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- const unsigned max = n;
+ unsigned int max = n;
int success;
- uint32_t mask = r->mask;

/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
n = max;

- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
+ *old_head = r->cons.head;
+ const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1. */
- entries = (prod_tail - cons_head);
+ *entries = (prod_tail - *old_head);

/* Set the actual entries for dequeue */
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;

if (unlikely(n == 0))
- goto end;
-
- cons_next = cons_head + n;
- success = rte_atomic32_cmpset(&r->cons.head, cons_head,
- cons_next);
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sc)
+ r->cons.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->cons.head, *old_head,
+ *new_head);
} while (unlikely(success == 0));
-
- /* copy in table */
- DEQUEUE_PTRS();
- rte_smp_rmb();
-
- /*
- * If there are other dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head))
- rte_pause();
-
- r->cons.tail = cons_next;
-end:
- if (available != NULL)
- *available = entries - n;
return n;
}

/**
- * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
- * When the request objects are more than the available objects, only dequeue
- * the actual number of objects
+ * @internal Dequeue several objects from the ring
*
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of void * pointers (objects).
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of objects to pull from the ring.
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param is_sc
+ * Indicates whether to use single consumer or multi-consumer head update
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
* @return
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+ int is_mp, unsigned int *available)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- uint32_t mask = r->mask;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = prod_tail - cons_head;
-
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
-
- if (unlikely(entries == 0))
- goto end;
+ uint32_t cons_head, cons_next;
+ uint32_t entries;

- cons_next = cons_head + n;
- r->cons.head = cons_next;
+ n = __rte_ring_move_cons_head(r, is_mp, n, behavior,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;

- /* copy in table */
DEQUEUE_PTRS();
rte_smp_rmb();

+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->cons.tail != cons_head))
+ rte_pause();
+
r->cons.tail = cons_next;
+
end:
if (available != NULL)
*available = entries - n;
@@ -617,8 +590,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MP, free_space);
}

/**
@@ -637,8 +610,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SP, free_space);
}

/**
@@ -661,10 +634,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
- else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->prod.sp_enqueue, free_space);
}

/**
@@ -744,8 +715,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MC, available);
}

/**
@@ -765,8 +736,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SC, available);
}

/**
@@ -789,10 +760,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
unsigned int *available)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
- else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->cons.sc_dequeue, available);
}

/**
@@ -975,8 +944,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
}

/**
@@ -995,8 +964,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
}

/**
@@ -1019,10 +988,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
- else
- return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
+ r->prod.sp_enqueue, free_space);
}

/**
@@ -1046,8 +1013,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
}

/**
@@ -1068,8 +1035,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
}

/**
@@ -1092,10 +1059,9 @@ static inline unsigned __attribute__((always_inline))
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
- else
- return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE,
+ r->cons.sc_dequeue, available);
}

#ifdef __cplusplus
--
2.9.3
Bruce Richardson
2017-03-07 11:32:16 UTC
Permalink
Both producer and consumer use the same logic for updating the tail
index so merge into a single function.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.h | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index d2ebc9d..6499a8d 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -344,6 +344,19 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
} while (0)

+static inline __attribute__((always_inline)) void
+update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val)
+{
+ /*
+ * If there are other enqueues/dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(ht->tail != old_val))
+ rte_pause();
+
+ ht->tail = new_val;
+}
+
/**
* @internal This function updates the producer head for enqueue
*
@@ -443,15 +456,7 @@ __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-
+ update_tail(&r->prod, prod_head, prod_next);
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -556,14 +561,7 @@ __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head))
- rte_pause();
-
- r->cons.tail = cons_next;
+ update_tail(&r->cons, cons_head, cons_next);

end:
if (available != NULL)
--
2.9.3
Bruce Richardson
2017-03-07 11:32:17 UTC
Permalink
Modify the enqueue and dequeue macros to support copying any type of
object by passing in the exact object type. Rather than using the "ring"
structure member of rte_ring, which is of type "array of void *", instead
have the macros take the start of the ring a a pointer value, thereby
leaving the rte_ring structure as purely a header value. This allows it
to be reused by other future ring types which can add on extra fields if
they want, or even to have the actual ring elements, of whatever type
stored separate from the ring header.

Signed-off-by: Bruce Richardson <***@intel.com>
---
lib/librte_ring/rte_ring.h | 68 ++++++++++++++++++++++++----------------------
1 file changed, 36 insertions(+), 32 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 6499a8d..d329476 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -162,11 +162,7 @@ struct rte_ring {

/** Ring consumer status. */
struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
-
- void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
- * not volatile so need to be careful
- * about compiler re-ordering */
-};
+} __rte_cache_aligned;

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
@@ -293,54 +289,62 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
/* the actual enqueue of pointers on the ring.
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
-#define ENQUEUE_PTRS() do { \
+#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
unsigned int i; \
- const uint32_t size = r->size; \
- uint32_t idx = prod_head & r->mask; \
+ const uint32_t size = (r)->size; \
+ uint32_t idx = prod_head & (r)->mask; \
+ obj_type *ring = (void *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
- r->ring[idx] = obj_table[i]; \
- r->ring[idx+1] = obj_table[i+1]; \
- r->ring[idx+2] = obj_table[i+2]; \
- r->ring[idx+3] = obj_table[i+3]; \
+ ring[idx] = obj_table[i]; \
+ ring[idx+1] = obj_table[i+1]; \
+ ring[idx+2] = obj_table[i+2]; \
+ ring[idx+3] = obj_table[i+3]; \
} \
switch (n & 0x3) { \
- case 3: r->ring[idx++] = obj_table[i++]; \
- case 2: r->ring[idx++] = obj_table[i++]; \
- case 1: r->ring[idx++] = obj_table[i++]; \
+ case 3: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 2: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 1: \
+ ring[idx++] = obj_table[i++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++)\
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
for (idx = 0; i < n; i++, idx++) \
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
} \
-} while(0)
+} while (0)

/* the actual copy of pointers on the ring to obj_table.
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
-#define DEQUEUE_PTRS() do { \
+#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
unsigned int i; \
- uint32_t idx = cons_head & r->mask; \
- const uint32_t size = r->size; \
+ uint32_t idx = cons_head & (r)->mask; \
+ const uint32_t size = (r)->size; \
+ obj_type *ring = (void *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
- obj_table[i] = r->ring[idx]; \
- obj_table[i+1] = r->ring[idx+1]; \
- obj_table[i+2] = r->ring[idx+2]; \
- obj_table[i+3] = r->ring[idx+3]; \
+ obj_table[i] = ring[idx]; \
+ obj_table[i+1] = ring[idx+1]; \
+ obj_table[i+2] = ring[idx+2]; \
+ obj_table[i+3] = ring[idx+3]; \
} \
switch (n & 0x3) { \
- case 3: obj_table[i++] = r->ring[idx++]; \
- case 2: obj_table[i++] = r->ring[idx++]; \
- case 1: obj_table[i++] = r->ring[idx++]; \
+ case 3: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 2: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 1: \
+ obj_table[i++] = ring[idx++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
for (idx = 0; i < n; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
} \
} while (0)

@@ -453,7 +457,7 @@ __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
if (n == 0)
goto end;

- ENQUEUE_PTRS();
+ ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
rte_smp_wmb();

update_tail(&r->prod, prod_head, prod_next);
@@ -558,7 +562,7 @@ __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
if (n == 0)
goto end;

- DEQUEUE_PTRS();
+ DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
rte_smp_rmb();

update_tail(&r->cons, cons_head, cons_next);
--
2.9.3
Olivier Matz
2017-03-14 08:59:04 UTC
Permalink
Post by Bruce Richardson
NOTE: this set depends on the v2 cleanup set sent previously.
http://dpdk.org/ml/archives/dev/2017-February/thread.html#58200
This patchset make a set of, sometimes non-backward compatible, cleanup
changes to the rte_ring code in order to improve it. The resulting code is
shorter, since the existing functions are restructured to reduce code
duplication, as well as being more consistent in behaviour. The specific
changes made are explained in each patch which makes that change.
* Eliminated extra cacheline padding where cachelines are 128B
* Renamed rte_ring_ht_ptr struct to rte_ring_headtail
* Removed missed references to ring watermarks in test code and docs
This patchset is largely the same as that posted previously on-list as
http://dpdk.org/ml/archives/dev/2017-February/thread.html#56982
* Included release notes updates as changes are made in each patch
* Fixed some missed comment updates when changing the code
* Separated some initial fixup patches from this set to send separately
* Dropped the final two patches for an rte_event_ring, as not relevant
for this set. That can be done as a separate set later.
* The macros for copying the pointers have an extra parameter added,
indicating the start of the ring buffer itself. This allows more
flexibility for reusing them in other ring implementations.
ring: remove split cacheline build setting
ring: create common structure for prod and cons metadata
ring: eliminate duplication of size and mask fields
ring: remove debug setting
ring: remove the yield when waiting for tail update
ring: remove watermark support
ring: make bulk and burst fn return vals consistent
ring: allow enqueue fns to return free space value
ring: allow dequeue fns to return remaining entry count
examples/quota_watermark: use ring space for watermarks
ring: reduce scope of local variables
ring: separate out head index manipulation for enq/deq
ring: create common function for updating tail idx
ring: make ring struct and enq/deq macros type agnostic
app/pdump/main.c | 2 +-
config/common_base | 3 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 -
doc/guides/prog_guide/ring_lib.rst | 15 -
doc/guides/rel_notes/release_17_05.rst | 32 +
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
drivers/crypto/null/null_crypto_pmd.c | 2 +-
drivers/net/bonding/rte_eth_bond_pmd.c | 3 +-
drivers/net/ring/rte_eth_ring.c | 4 +-
examples/distributor/main.c | 5 +-
examples/load_balancer/runtime.c | 34 +-
.../client_server_mp/mp_client/client.c | 9 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 13 +-
examples/qos_sched/app_thread.c | 14 +-
examples/quota_watermark/qw/init.c | 5 +-
examples/quota_watermark/qw/main.c | 21 +-
examples/quota_watermark/qw/main.h | 1 +
examples/quota_watermark/qwctl/commands.c | 4 +-
examples/quota_watermark/qwctl/qwctl.c | 2 +
examples/quota_watermark/qwctl/qwctl.h | 1 +
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 5 +-
lib/librte_mempool/rte_mempool_ring.c | 12 +-
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_frag.c | 3 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 34 +-
lib/librte_ring/rte_ring.c | 76 +--
lib/librte_ring/rte_ring.h | 760 ++++++++-------------
test/test-pipeline/pipeline_hash.c | 5 +-
test/test-pipeline/runtime.c | 19 +-
test/test/autotest_test_funcs.py | 7 -
test/test/commands.c | 52 --
test/test/test_link_bonding_mode4.c | 6 +-
test/test/test_pmd_ring_perf.c | 12 +-
test/test/test_ring.c | 704 +++----------------
test/test/test_ring_perf.c | 36 +-
test/test/test_table_acl.c | 2 +-
test/test/test_table_pipeline.c | 2 +-
test/test/test_table_ports.c | 12 +-
test/test/virtual_pmd.c | 8 +-
43 files changed, 554 insertions(+), 1388 deletions(-)
Series
Acked-by: Olivier Matz <***@6wind.com>

Thanks!
Bruce Richardson
2017-03-24 17:09:54 UTC
Permalink
This patchset make a set of, sometimes non-backward compatible, cleanup
changes to the rte_ring code in order to improve it. The resulting code is
shorter, since the existing functions are restructured to reduce code
duplication, as well as being more consistent in behaviour. The specific
changes made are explained in each patch which makes that change.

Changes in V3:
* removed anonymous union for sp_enqueue and sc_dequeue variables (p2)
* fixed parameter to dequeue function which was mistakenly named is_mp
instead of is_sc (as described in the comments) (p12)
* skipped unneeded check in tail pointer update fn for sp/sc operation (p13)

Changes in V2:
* Eliminated extra cacheline padding where cachelines are 128B
* Renamed rte_ring_ht_ptr struct to rte_ring_headtail
* Removed missed references to ring watermarks in test code and docs

This patchset is largely the same as that posted previously on-list as
an RFC:
http://dpdk.org/ml/archives/dev/2017-February/thread.html#56982

Changes in V1 from RFC:
* Included release notes updates as changes are made in each patch
* Fixed some missed comment updates when changing the code
* Separated some initial fixup patches from this set to send separately
* Dropped the final two patches for an rte_event_ring, as not relevant
for this set. That can be done as a separate set later.
* The macros for copying the pointers have an extra parameter added,
indicating the start of the ring buffer itself. This allows more
flexibility for reusing them in other ring implementations.

Bruce Richardson (14):
ring: remove split cacheline build setting
ring: create common structure for prod and cons metadata
ring: eliminate duplication of size and mask fields
ring: remove debug setting
ring: remove the yield when waiting for tail update
ring: remove watermark support
ring: make bulk and burst fn return vals consistent
ring: allow enqueue fns to return free space value
ring: allow dequeue fns to return remaining entry count
examples/quota_watermark: use ring space for watermarks
ring: reduce scope of local variables
ring: separate out head index manipulation for enq/deq
ring: create common function for updating tail idx
ring: make ring struct and enq/deq macros type agnostic

app/pdump/main.c | 2 +-
config/common_base | 3 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 -
doc/guides/prog_guide/ring_lib.rst | 15 -
doc/guides/rel_notes/release_17_05.rst | 33 +
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
drivers/crypto/null/null_crypto_pmd.c | 2 +-
drivers/net/bonding/rte_eth_bond_pmd.c | 3 +-
drivers/net/ring/rte_eth_ring.c | 4 +-
examples/distributor/main.c | 5 +-
examples/load_balancer/runtime.c | 34 +-
.../client_server_mp/mp_client/client.c | 9 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 13 +-
examples/qos_sched/app_thread.c | 14 +-
examples/quota_watermark/qw/init.c | 5 +-
examples/quota_watermark/qw/main.c | 21 +-
examples/quota_watermark/qw/main.h | 1 +
examples/quota_watermark/qwctl/commands.c | 4 +-
examples/quota_watermark/qwctl/qwctl.c | 2 +
examples/quota_watermark/qwctl/qwctl.h | 1 +
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 5 +-
lib/librte_mempool/rte_mempool_ring.c | 12 +-
lib/librte_pdump/rte_pdump.c | 4 +-
lib/librte_port/rte_port_frag.c | 3 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 46 +-
lib/librte_ring/rte_ring.c | 76 +--
lib/librte_ring/rte_ring.h | 759 ++++++++-------------
test/test-pipeline/pipeline_hash.c | 5 +-
test/test-pipeline/runtime.c | 19 +-
test/test/autotest_test_funcs.py | 7 -
test/test/commands.c | 52 --
test/test/test_link_bonding_mode4.c | 6 +-
test/test/test_pmd_ring_perf.c | 12 +-
test/test/test_ring.c | 704 +++----------------
test/test/test_ring_perf.c | 36 +-
test/test/test_table_acl.c | 2 +-
test/test/test_table_pipeline.c | 2 +-
test/test/test_table_ports.c | 12 +-
test/test/virtual_pmd.c | 8 +-
43 files changed, 561 insertions(+), 1395 deletions(-)
--
2.9.3
Bruce Richardson
2017-03-24 17:09:55 UTC
Permalink
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. On platforms with 64B
cachelines, for improved performance use 128B rather than 64B alignment
since it stops the producer and consumer data being on adjacent cachelines.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V2: Limit the cacheline * 2 alignment to platforms with < 128B line size
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 7 +++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 16 ++++++++++------
4 files changed, 17 insertions(+), 9 deletions(-)

diff --git a/config/common_base b/config/common_base
index 37aa1e1..c394651 100644
--- a/config/common_base
+++ b/config/common_base
@@ -453,7 +453,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 918f483..57ae8bf 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -120,6 +120,13 @@ API Changes
* The LPM ``next_hop`` field is extended from 8 bits to 21 bits for IPv6
while keeping ABI compatibility.

+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+ have been made to it:
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
+
ABI Changes
-----------

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..399ae3b 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -139,6 +139,14 @@ struct rte_ring_debug_stats {

struct rte_memzone; /* forward declaration, so as not to require memzone.h */

+#if RTE_CACHE_LINE_SIZE < 128
+#define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#else
+#define PROD_ALIGN RTE_CACHE_LINE_SIZE
+#define CONS_ALIGN RTE_CACHE_LINE_SIZE
+#endif
+
/**
* An RTE ring structure.
*
@@ -168,7 +176,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(PROD_ALIGN);

/** Ring consumer status. */
struct cons {
@@ -177,11 +185,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Consumer head. */
volatile uint32_t tail; /**< Consumer tail. */
-#ifdef RTE_RING_SPLIT_PROD_CONS
- } cons __rte_cache_aligned;
-#else
- } cons;
-#endif
+ } cons __rte_aligned(CONS_ALIGN);

#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
--
2.9.3
Bruce Richardson
2017-03-24 17:09:56 UTC
Permalink
create a common structure to hold the metadata for the producer and
the consumer, since both need essentially the same information - the
head and tail values, the ring size and mask.

Signed-off-by: Bruce Richardson <***@intel.com>
---
V3: removed union and replaced with "single" variable
V2: renamed the shared structure based on maintainer feedback.
---
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_ring.c | 12 ++++++------
lib/librte_ring/rte_ring.c | 4 ++--
lib/librte_ring/rte_ring.h | 41 +++++++++++++++++++----------------------
4 files changed, 28 insertions(+), 31 deletions(-)

diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index a580a6a..cc0b5b1 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -740,7 +740,7 @@ pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
rte_errno = EINVAL;
return -1;
}
- if (ring->prod.sp_enqueue || ring->cons.sc_dequeue) {
+ if (ring->prod.single || ring->cons.single) {
RTE_LOG(ERR, PDUMP, "ring with either SP or SC settings"
" is not valid for pdump, should have MP and MC settings\n");
rte_errno = EINVAL;
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 3b9d3d0..0df1bcf 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -73,8 +73,8 @@ rte_port_ring_reader_create_internal(void *params, int socket_id,
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->cons.sc_dequeue && is_multi) ||
- (!(conf->ring->cons.sc_dequeue) && !is_multi)) {
+ (conf->ring->cons.single && is_multi) ||
+ (!(conf->ring->cons.single) && !is_multi)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
}
@@ -198,8 +198,8 @@ rte_port_ring_writer_create_internal(void *params, int socket_id,
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->prod.sp_enqueue && is_multi) ||
- (!(conf->ring->prod.sp_enqueue) && !is_multi) ||
+ (conf->ring->prod.single && is_multi) ||
+ (!(conf->ring->prod.single) && !is_multi) ||
(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
@@ -467,8 +467,8 @@ rte_port_ring_writer_nodrop_create_internal(void *params, int socket_id,
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->prod.sp_enqueue && is_multi) ||
- (!(conf->ring->prod.sp_enqueue) && !is_multi) ||
+ (conf->ring->prod.single && is_multi) ||
+ (!(conf->ring->prod.single) && !is_multi) ||
(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 4bc6da1..93a8692 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -145,8 +145,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
return -ENAMETOOLONG;
r->flags = flags;
r->prod.watermark = count;
- r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
- r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
+ r->prod.single = !!(flags & RING_F_SP_ENQ);
+ r->cons.single = !!(flags & RING_F_SC_DEQ);
r->prod.size = r->cons.size = count;
r->prod.mask = r->cons.mask = count-1;
r->prod.head = r->cons.head = 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 399ae3b..331c94f 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -147,6 +147,16 @@ struct rte_memzone; /* forward declaration, so as not to require memzone.h */
#define CONS_ALIGN RTE_CACHE_LINE_SIZE
#endif

+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_headtail {
+ volatile uint32_t head; /**< Prod/consumer head. */
+ volatile uint32_t tail; /**< Prod/consumer tail. */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t single; /**< True if single prod/cons */
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */
+};
+
/**
* An RTE ring structure.
*
@@ -169,23 +179,10 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */

/** Ring producer status. */
- struct prod {
- uint32_t watermark; /**< Maximum items before EDQUOT. */
- uint32_t sp_enqueue; /**< True, if single producer. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Producer head. */
- volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_aligned(PROD_ALIGN);
+ struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);

/** Ring consumer status. */
- struct cons {
- uint32_t sc_dequeue; /**< True, if single consumer. */
- uint32_t size; /**< Size of the ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Consumer head. */
- volatile uint32_t tail; /**< Consumer tail. */
- } cons __rte_aligned(CONS_ALIGN);
+ struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);

#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
@@ -837,7 +834,7 @@ static inline int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue_bulk(r, obj_table, n);
else
return rte_ring_mp_enqueue_bulk(r, obj_table, n);
@@ -904,7 +901,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue(r, obj);
else
return rte_ring_mp_enqueue(r, obj);
@@ -975,7 +972,7 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue_bulk(r, obj_table, n);
else
return rte_ring_mc_dequeue_bulk(r, obj_table, n);
@@ -1039,7 +1036,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue(r, obj_p);
else
return rte_ring_mc_dequeue(r, obj_p);
@@ -1206,7 +1203,7 @@ static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue_burst(r, obj_table, n);
else
return rte_ring_mp_enqueue_burst(r, obj_table, n);
@@ -1274,7 +1271,7 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
static inline unsigned __attribute__((always_inline))
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue_burst(r, obj_table, n);
else
return rte_ring_mc_dequeue_burst(r, obj_table, n);
--
2.9.3
Olivier Matz
2017-03-27 07:20:45 UTC
Permalink
Post by Bruce Richardson
create a common structure to hold the metadata for the producer and
the consumer, since both need essentially the same information - the
head and tail values, the ring size and mask.
Acked-by: Olivier Matz <***@6wind.com>
Bruce Richardson
2017-03-24 17:09:57 UTC
Permalink
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
lib/librte_ring/rte_ring.c | 20 ++++++++++----------
lib/librte_ring/rte_ring.h | 32 ++++++++++++++++----------------
test/test/test_ring.c | 6 +++---
3 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 93a8692..93485d4 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -144,11 +144,11 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.watermark = count;
+ r->watermark = count;
r->prod.single = !!(flags & RING_F_SP_ENQ);
r->cons.single = !!(flags & RING_F_SC_DEQ);
- r->prod.size = r->cons.size = count;
- r->prod.mask = r->cons.mask = count-1;
+ r->size = count;
+ r->mask = count - 1;
r->prod.head = r->cons.head = 0;
r->prod.tail = r->cons.tail = 0;

@@ -269,14 +269,14 @@ rte_ring_free(struct rte_ring *r)
int
rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
{
- if (count >= r->prod.size)
+ if (count >= r->size)
return -EINVAL;

/* if count is 0, disable the watermarking */
if (count == 0)
- count = r->prod.size;
+ count = r->size;

- r->prod.watermark = count;
+ r->watermark = count;
return 0;
}

@@ -291,17 +291,17 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)

fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->prod.watermark == r->prod.size)
+ if (r->watermark == r->size)
fprintf(f, " watermark=0\n");
else
- fprintf(f, " watermark=%"PRIu32"\n", r->prod.watermark);
+ fprintf(f, " watermark=%"PRIu32"\n", r->watermark);

/* sum and dump statistics */
#ifdef RTE_LIBRTE_RING_DEBUG
@@ -318,7 +318,7 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
}
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 331c94f..d650215 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -151,10 +151,7 @@ struct rte_memzone; /* forward declaration, so as not to require memzone.h */
struct rte_ring_headtail {
volatile uint32_t head; /**< Prod/consumer head. */
volatile uint32_t tail; /**< Prod/consumer tail. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
uint32_t single; /**< True if single prod/cons */
- uint32_t watermark; /**< Max items before EDQUOT in producer. */
};

/**
@@ -174,9 +171,12 @@ struct rte_ring {
* next time the ABI changes
*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
- int flags; /**< Flags supplied at creation. */
+ int flags; /**< Flags supplied at creation. */
const struct rte_memzone *memzone;
/**< Memzone, if any, containing the rte_ring */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -355,7 +355,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
- const uint32_t size = r->prod.size; \
+ const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
@@ -382,7 +382,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
uint32_t idx = cons_head & mask; \
- const uint32_t size = r->cons.size; \
+ const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
obj_table[i] = r->ring[idx]; \
@@ -437,7 +437,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
const unsigned max = n;
int success;
unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;
int ret;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -485,7 +485,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
@@ -544,7 +544,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
unsigned i;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;
int ret;

prod_head = r->prod.head;
@@ -580,7 +580,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
@@ -630,7 +630,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
const unsigned max = n;
int success;
unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -727,7 +727,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
unsigned i;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;

cons_head = r->cons.head;
prod_tail = r->prod.tail;
@@ -1056,7 +1056,7 @@ rte_ring_full(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
+ return ((cons_tail - prod_tail - 1) & r->mask) == 0;
}

/**
@@ -1089,7 +1089,7 @@ rte_ring_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->prod.mask;
+ return (prod_tail - cons_tail) & r->mask;
}

/**
@@ -1105,7 +1105,7 @@ rte_ring_free_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->prod.mask;
+ return (cons_tail - prod_tail - 1) & r->mask;
}

/**
@@ -1119,7 +1119,7 @@ rte_ring_free_count(const struct rte_ring *r)
static inline unsigned int
rte_ring_get_size(const struct rte_ring *r)
{
- return r->prod.size;
+ return r->size;
}

/**
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index ebcb896..5f09097 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -148,7 +148,7 @@ check_live_watermark_change(__attribute__((unused)) void *dummy)
}

/* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->prod.watermark;
+ watermark = r->watermark;
if (watermark != watermark_old &&
(watermark_old != 16 || watermark != 32)) {
printf("Bad watermark change %u -> %u\n", watermark_old,
@@ -213,7 +213,7 @@ test_set_watermark( void ){
printf( " ring lookup failed\n" );
goto error;
}
- count = r->prod.size*2;
+ count = r->size * 2;
setwm = rte_ring_set_water_mark(r, count);
if (setwm != -EINVAL){
printf("Test failed to detect invalid watermark count value\n");
@@ -222,7 +222,7 @@ test_set_watermark( void ){

count = 0;
rte_ring_set_water_mark(r, count);
- if (r->prod.watermark != r->prod.size) {
+ if (r->watermark != r->size) {
printf("Test failed to detect invalid watermark count value\n");
goto error;
}
--
2.9.3
Thomas Monjalon
2017-03-27 09:52:58 UTC
Permalink
Post by Bruce Richardson
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.
Sorry Bruce, I encounter this error:

fatal error: no member named 'size' in 'struct rte_ring_headtail'
if (r->prod.size >= ring_size) {
~~~~~~~ ^
Bruce Richardson
2017-03-27 10:13:39 UTC
Permalink
Post by Thomas Monjalon
Post by Bruce Richardson
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.
fatal error: no member named 'size' in 'struct rte_ring_headtail'
if (r->prod.size >= ring_size) {
~~~~~~~ ^
Hi Thomas,

again I need more information here. I've just revalidated these first
three patches doing 7 builds with each one (gcc, clang, debug, shared
library, old ABI, default-machine and 32-bit), as well as compiling the
apps for gcc and clang, and I see no errors.

/Bruce
Bruce Richardson
2017-03-27 10:15:49 UTC
Permalink
Post by Thomas Monjalon
Post by Bruce Richardson
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.
fatal error: no member named 'size' in 'struct rte_ring_headtail'
if (r->prod.size >= ring_size) {
~~~~~~~ ^
Ok, I think I've found it now using git grep. I assume this is in the
crypto code which is disabled by default, right?

/Bruce
Thomas Monjalon
2017-03-27 13:13:04 UTC
Permalink
Post by Bruce Richardson
Post by Thomas Monjalon
Post by Bruce Richardson
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.
fatal error: no member named 'size' in 'struct rte_ring_headtail'
if (r->prod.size >= ring_size) {
~~~~~~~ ^
Ok, I think I've found it now using git grep. I assume this is in the
crypto code which is disabled by default, right?
Right, sorry for forgetting the context.
Bruce Richardson
2017-03-27 14:57:40 UTC
Permalink
Post by Thomas Monjalon
Post by Bruce Richardson
Post by Thomas Monjalon
Post by Bruce Richardson
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.
fatal error: no member named 'size' in 'struct rte_ring_headtail'
if (r->prod.size >= ring_size) {
~~~~~~~ ^
Ok, I think I've found it now using git grep. I assume this is in the
crypto code which is disabled by default, right?
Right, sorry for forgetting the context.
Submitted a new patch for the crypto drivers to fix this issue. The
previous patch I did to fix this as a pre-requisite didn't catch the
drivers that were disabled by default.

http://dpdk.org/dev/patchwork/patch/22445/

I've verified this now compiles with the aesni-mb crypto PMD enabled at
least. I'll see about enabling a few more crypto drivers on my system
just in case there are other things I missed in them for this set.

/Bruce
Bruce Richardson
2017-03-24 17:09:58 UTC
Permalink
The debug option only provided statistics to the user, most of
which could be tracked by the application itself. Remove this as a
compile time option, and feature, simplifying the code.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
config/common_base | 1 -
doc/guides/prog_guide/ring_lib.rst | 7 -
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.c | 41 ----
lib/librte_ring/rte_ring.h | 97 +-------
test/test/test_ring.c | 410 ---------------------------------
6 files changed, 13 insertions(+), 544 deletions(-)

diff --git a/config/common_base b/config/common_base
index c394651..69e91ae 100644
--- a/config/common_base
+++ b/config/common_base
@@ -452,7 +452,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_LIBRTE_RING_DEBUG=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index 9f69753..d4ab502 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -110,13 +110,6 @@ Once an enqueue operation reaches the high water mark, the producer is notified,

This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.

-Debug
-~~~~~
-
-When debug is enabled (CONFIG_RTE_LIBRTE_RING_DEBUG is set),
-the library stores some per-ring statistic counters about the number of enqueues/dequeues.
-These statistics are per-core to avoid concurrent accesses or atomic operations.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 57ae8bf..742ad6c 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -126,6 +126,7 @@ API Changes
have been made to it:

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
+ * removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 93485d4..934ce87 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -131,12 +131,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_LIBRTE_RING_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif

/* init the ring structure */
memset(r, 0, sizeof(*r));
@@ -284,11 +278,6 @@ rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats sum;
- unsigned lcore_id;
-#endif
-
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
@@ -302,36 +291,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " watermark=0\n");
else
fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
-
- /* sum and dump statistics */
-#ifdef RTE_LIBRTE_RING_DEBUG
- memset(&sum, 0, sizeof(sum));
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
- sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
- sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
- sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
- sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
- sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
- sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
- sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
- sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
- sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
- }
- fprintf(f, " size=%"PRIu32"\n", r->size);
- fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
- fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
- fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
- fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
- fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
- fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
- fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
- fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
- fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
- fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
-#else
- fprintf(f, " no statistics available\n");
-#endif
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index d650215..2777b41 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -109,24 +109,6 @@ enum rte_ring_queue_behavior {
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};

-#ifdef RTE_LIBRTE_RING_DEBUG
-/**
- * A structure that stores the ring statistics (per-lcore).
- */
-struct rte_ring_debug_stats {
- uint64_t enq_success_bulk; /**< Successful enqueues number. */
- uint64_t enq_success_objs; /**< Objects successfully enqueued. */
- uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
- uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
- uint64_t enq_fail_bulk; /**< Failed enqueues number. */
- uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
- uint64_t deq_success_bulk; /**< Successful dequeues number. */
- uint64_t deq_success_objs; /**< Objects successfully dequeued. */
- uint64_t deq_fail_bulk; /**< Failed dequeues number. */
- uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
-} __rte_cache_aligned;
-#endif
-
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
@@ -184,10 +166,6 @@ struct rte_ring {
/** Ring consumer status. */
struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);

-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
-#endif
-
void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
* not volatile so need to be careful
* about compiler re-ordering */
@@ -199,27 +177,6 @@ struct rte_ring {
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
- * @internal When debug is enabled, store ring statistics.
- * @param r
- * A pointer to the ring.
- * @param name
- * The name of the statistics field to increment in the ring.
- * @param n
- * The number to add to the object-oriented statistics.
- */
-#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- if (__lcore_id < RTE_MAX_LCORE) { \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
- } \
- } while(0)
-#else
-#define __RING_STAT_ADD(r, name, n) do {} while(0)
-#endif
-
-/**
* Calculate the memory size needed for a ring
*
* This function returns the number of bytes needed for a ring, given
@@ -460,17 +417,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -485,15 +437,11 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

/*
* If there are other enqueues in progress that preceded us,
@@ -557,17 +505,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -580,15 +523,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

r->prod.tail = prod_next;
return ret;
@@ -652,16 +591,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

/* Set the actual entries for dequeue */
if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -691,7 +625,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
sched_yield();
}
}
- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -738,16 +671,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = prod_tail - cons_head;

if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -759,7 +687,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
}
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 5f09097..3891f5d 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -763,412 +763,6 @@ test_ring_burst_basic(void)
return -1;
}

-static int
-test_ring_stats(void)
-{
-
-#ifndef RTE_LIBRTE_RING_DEBUG
- printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
- return 0;
-#else
- void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
- int ret;
- unsigned i;
- unsigned num_items = 0;
- unsigned failed_enqueue_ops = 0;
- unsigned failed_enqueue_items = 0;
- unsigned failed_dequeue_ops = 0;
- unsigned failed_dequeue_items = 0;
- unsigned last_enqueue_ops = 0;
- unsigned last_enqueue_items = 0;
- unsigned last_quota_ops = 0;
- unsigned last_quota_items = 0;
- unsigned lcore_id = rte_lcore_id();
- struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
-
- printf("Test the ring stats.\n");
-
- /* Reset the watermark in case it was set in another test. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Allocate some dummy object pointers. */
- src = malloc(RING_SIZE*2*sizeof(void *));
- if (src == NULL)
- goto fail;
-
- for (i = 0; i < RING_SIZE*2 ; i++) {
- src[i] = (void *)(unsigned long)i;
- }
-
- /* Allocate some memory for copied objects. */
- dst = malloc(RING_SIZE*2*sizeof(void *));
- if (dst == NULL)
- goto fail;
-
- memset(dst, 0, RING_SIZE*2*sizeof(void *));
-
- /* Set the head and tail pointers. */
- cur_src = src;
- cur_dst = dst;
-
- /* Do Enqueue tests. */
- printf("Test the dequeue stats.\n");
-
- /* Fill the ring up to RING_SIZE -1. */
- printf("Fill the ring.\n");
- for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
- rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- }
-
- /* Adjust for final enqueue = MAX_BULK -1. */
- cur_src--;
-
- printf("Verify that the ring is full.\n");
- if (rte_ring_full(r) != 1)
- goto fail;
-
-
- printf("Verify the enqueue success stats.\n");
- /* Stats should match above enqueue operations to fill the ring. */
- if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Current max objects is RING_SIZE -1. */
- if (ring_stats->enq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any failures yet. */
- if (ring_stats->enq_fail_bulk != 0)
- goto fail;
- if (ring_stats->enq_fail_objs != 0)
- goto fail;
-
-
- printf("Test stats for SP burst enqueue to a full ring.\n");
- num_items = 2;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for SP bulk enqueue to a full ring.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP burst enqueue to a full ring.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP bulk enqueue to a full ring.\n");
- num_items = 16;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- /* Do Dequeue tests. */
- printf("Test the dequeue stats.\n");
-
- printf("Empty the ring.\n");
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* There was only RING_SIZE -1 objects to dequeue. */
- cur_dst++;
-
- printf("Verify ring is empty.\n");
- if (1 != rte_ring_empty(r))
- goto fail;
-
- printf("Verify the dequeue success stats.\n");
- /* Stats should match above dequeue operations. */
- if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Objects dequeued is RING_SIZE -1. */
- if (ring_stats->deq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any dequeue failure stats yet. */
- if (ring_stats->deq_fail_bulk != 0)
- goto fail;
-
- printf("Test stats for SC burst dequeue with an empty ring.\n");
- num_items = 2;
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for SC bulk dequeue with an empty ring.\n");
- num_items = 4;
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC burst dequeue with an empty ring.\n");
- num_items = 8;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC bulk dequeue with an empty ring.\n");
- num_items = 16;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test total enqueue/dequeue stats.\n");
- /* At this point the enqueue and dequeue stats should be the same. */
- if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
- goto fail;
- if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
- goto fail;
- if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
- goto fail;
- if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
- goto fail;
-
-
- /* Watermark Tests. */
- printf("Test the watermark/quota stats.\n");
-
- printf("Verify the initial watermark stats.\n");
- /* Watermark stats should be 0 since there is no watermark. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Set a watermark. */
- rte_ring_set_water_mark(r, 16);
-
- /* Reset pointers. */
- cur_src = src;
- cur_dst = dst;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue below watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should still be 0. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Success stats should have increased. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
- goto fail;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue at watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != 1)
- goto fail;
- if (ring_stats->enq_quota_objs != num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP burst enqueue above watermark.\n");
- num_items = 1;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP burst enqueue above watermark.\n");
- num_items = 2;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP bulk enqueue above watermark.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP bulk enqueue above watermark.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- printf("Test watermark success stats.\n");
- /* Success stats should be same as last non-watermarked enqueue. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items)
- goto fail;
-
-
- /* Cleanup. */
-
- /* Empty the ring. */
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* Reset the watermark. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Free memory before test completed */
- free(src);
- free(dst);
- return 0;
-
-fail:
- free(src);
- free(dst);
- return -1;
-#endif
-}
-
/*
* it will always fail to create ring with a wrong ring size number in this function
*/
@@ -1335,10 +929,6 @@ test_ring(void)
if (test_ring_basic() < 0)
return -1;

- /* ring stats */
- if (test_ring_stats() < 0)
- return -1;
-
/* basic operations */
if (test_live_watermark_change() < 0)
return -1;
--
2.9.3
Bruce Richardson
2017-03-24 17:09:59 UTC
Permalink
There was a compile time setting to enable a ring to yield when
it entered a loop in mp or mc rings waiting for the tail pointer update.
Build time settings are not recommended for enabling/disabling features,
and since this was off by default, remove it completely. If needed, a
runtime enabled equivalent can be used.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
config/common_base | 1 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 ----
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.h | 35 +++++--------------------
4 files changed, 7 insertions(+), 35 deletions(-)

diff --git a/config/common_base b/config/common_base
index 69e91ae..2d54ddf 100644
--- a/config/common_base
+++ b/config/common_base
@@ -452,7 +452,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
# Compile librte_mempool
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 10a10a8..7c39cd2 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -352,11 +352,6 @@ Known Issues

3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.

- ``RTE_RING_PAUSE_REP_COUNT`` is defined for rte_ring to reduce contention. It's mainly for case 2, a yield is issued after number of times pause repeat.
-
- It adds a sched_yield() syscall if the thread spins for too long while waiting on the other thread to finish its operations on the ring.
- This gives the preempted thread a chance to proceed and finish with the ring enqueue/dequeue operation.
-
+ rte_timer

Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 742ad6c..556869f 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -127,6 +127,7 @@ API Changes

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
+ * removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 2777b41..f8ac7f5 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -114,11 +114,6 @@ enum rte_ring_queue_behavior {
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)

-#ifndef RTE_RING_PAUSE_REP_COUNT
-#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
- * if RTE_RING_PAUSE_REP not defined. */
-#endif
-
struct rte_memzone; /* forward declaration, so as not to require memzone.h */

#if RTE_CACHE_LINE_SIZE < 128
@@ -393,7 +388,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -447,18 +442,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->prod.tail != prod_head)) {
+ while (unlikely(r->prod.tail != prod_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->prod.tail = prod_next;
return ret;
}
@@ -491,7 +477,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -568,7 +554,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -613,18 +599,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* If there are other dequeues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->cons.tail != cons_head)) {
+ while (unlikely(r->cons.tail != cons_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -659,7 +636,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-03-24 17:10:00 UTC
Permalink
Remove the watermark support. A future commit will add support for having
enqueue functions return the amount of free space in the ring, which will
allow applications to implement their own watermark checks, while also
being more useful to the app.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V2: fix missed references to watermarks in v1
---
doc/guides/prog_guide/ring_lib.rst | 8 --
doc/guides/rel_notes/release_17_05.rst | 2 +
examples/Makefile | 2 +-
lib/librte_ring/rte_ring.c | 23 -----
lib/librte_ring/rte_ring.h | 58 +------------
test/test/autotest_test_funcs.py | 7 --
test/test/commands.c | 52 ------------
test/test/test_ring.c | 149 +--------------------------------
8 files changed, 8 insertions(+), 293 deletions(-)

diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index d4ab502..b31ab7a 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -102,14 +102,6 @@ Name
A ring is identified by a unique name.
It is not possible to create two rings with the same name (rte_ring_create() returns NULL if this is attempted).

-Water Marking
-~~~~~~~~~~~~~
-
-The ring can have a high water mark (threshold).
-Once an enqueue operation reaches the high water mark, the producer is notified, if the water mark is configured.
-
-This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 556869f..af907b8 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -128,6 +128,8 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
+ * removed the function ``rte_ring_set_water_mark`` as part of a general
+ removal of watermarks support in the library.

ABI Changes
-----------
diff --git a/examples/Makefile b/examples/Makefile
index da2bfdd..19cd5ad 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-DIRS-y += quota_watermark
+#DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 934ce87..25f64f0 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,7 +138,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->watermark = count;
r->prod.single = !!(flags & RING_F_SP_ENQ);
r->cons.single = !!(flags & RING_F_SC_DEQ);
r->size = count;
@@ -256,24 +255,6 @@ rte_ring_free(struct rte_ring *r)
rte_free(te);
}

-/*
- * change the high water mark. If *count* is 0, water marking is
- * disabled
- */
-int
-rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
-{
- if (count >= r->size)
- return -EINVAL;
-
- /* if count is 0, disable the watermarking */
- if (count == 0)
- count = r->size;
-
- r->watermark = count;
- return 0;
-}
-
/* dump the status of the ring on the console */
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
@@ -287,10 +268,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->watermark == r->size)
- fprintf(f, " watermark=0\n");
- else
- fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index f8ac7f5..906e8ae 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -153,7 +153,6 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */
uint32_t size; /**< Size of ring. */
uint32_t mask; /**< Mask (size-1) of ring. */
- uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -168,7 +167,6 @@ struct rte_ring {

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
-#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
@@ -274,26 +272,6 @@ struct rte_ring *rte_ring_create(const char *name, unsigned count,
void rte_ring_free(struct rte_ring *r);

/**
- * Change the high water mark.
- *
- * If *count* is 0, water marking is disabled. Otherwise, it is set to the
- * *count* value. The *count* value must be greater than 0 and less
- * than the ring size.
- *
- * This function can be called at any time (not necessarily at
- * initialization).
- *
- * @param r
- * A pointer to the ring structure.
- * @param count
- * The new water mark value.
- * @return
- * - 0: Success; water mark changed.
- * - -EINVAL: Invalid water mark value.
- */
-int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
-
-/**
* Dump the status of the ring to a file.
*
* @param f
@@ -374,8 +352,6 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -390,7 +366,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
int success;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -431,13 +406,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
/*
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
@@ -446,7 +414,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -465,8 +433,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -479,7 +445,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_next, free_entries;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

prod_head = r->prod.head;
cons_tail = r->cons.tail;
@@ -508,15 +473,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -682,8 +640,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -704,8 +660,6 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -730,8 +684,6 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -756,8 +708,6 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -775,8 +725,6 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -798,8 +746,6 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
diff --git a/test/test/autotest_test_funcs.py b/test/test/autotest_test_funcs.py
index 1c5f390..8da8fcd 100644
--- a/test/test/autotest_test_funcs.py
+++ b/test/test/autotest_test_funcs.py
@@ -292,11 +292,4 @@ def ring_autotest(child, test_name):
elif index == 2:
return -1, "Fail [Timeout]"

- child.sendline("set_watermark test 100")
- child.sendline("dump_ring test")
- index = child.expect([" watermark=100",
- pexpect.TIMEOUT], timeout=1)
- if index != 0:
- return -1, "Fail [Bad watermark]"
-
return 0, "Success"
diff --git a/test/test/commands.c b/test/test/commands.c
index 2df46b0..551c81d 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -228,57 +228,6 @@ cmdline_parse_inst_t cmd_dump_one = {

/****************/

-struct cmd_set_ring_result {
- cmdline_fixed_string_t set;
- cmdline_fixed_string_t name;
- uint32_t value;
-};
-
-static void cmd_set_ring_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_set_ring_result *res = parsed_result;
- struct rte_ring *r;
- int ret;
-
- r = rte_ring_lookup(res->name);
- if (r == NULL) {
- cmdline_printf(cl, "Cannot find ring\n");
- return;
- }
-
- if (!strcmp(res->set, "set_watermark")) {
- ret = rte_ring_set_water_mark(r, res->value);
- if (ret != 0)
- cmdline_printf(cl, "Cannot set water mark\n");
- }
-}
-
-cmdline_parse_token_string_t cmd_set_ring_set =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set,
- "set_watermark");
-
-cmdline_parse_token_string_t cmd_set_ring_name =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL);
-
-cmdline_parse_token_num_t cmd_set_ring_value =
- TOKEN_NUM_INITIALIZER(struct cmd_set_ring_result, value, UINT32);
-
-cmdline_parse_inst_t cmd_set_ring = {
- .f = cmd_set_ring_parsed, /* function to call */
- .data = NULL, /* 2nd arg of func */
- .help_str = "set watermark: "
- "set_watermark <ring_name> <value>",
- .tokens = { /* token list, NULL terminated */
- (void *)&cmd_set_ring_set,
- (void *)&cmd_set_ring_name,
- (void *)&cmd_set_ring_value,
- NULL,
- },
-};
-
-/****************/
-
struct cmd_quit_result {
cmdline_fixed_string_t quit;
};
@@ -419,7 +368,6 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_autotest,
(cmdline_parse_inst_t *)&cmd_dump,
(cmdline_parse_inst_t *)&cmd_dump_one,
- (cmdline_parse_inst_t *)&cmd_set_ring,
(cmdline_parse_inst_t *)&cmd_quit,
(cmdline_parse_inst_t *)&cmd_set_rxtx,
(cmdline_parse_inst_t *)&cmd_set_rxtx_anchor,
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 3891f5d..666a451 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -78,21 +78,6 @@
* - Dequeue one object, two objects, MAX_BULK objects
* - Check that dequeued pointers are correct
*
- * - Test watermark and default bulk enqueue/dequeue:
- *
- * - Set watermark
- * - Set default bulk value
- * - Enqueue objects, check that -EDQUOT is returned when
- * watermark is exceeded
- * - Check that dequeued pointers are correct
- *
- * #. Check live watermark change
- *
- * - Start a loop on another lcore that will enqueue and dequeue
- * objects in a ring. It will monitor the value of watermark.
- * - At the same time, change the watermark on the master lcore.
- * - The slave lcore will check that watermark changes from 16 to 32.
- *
* #. Performance tests.
*
* Tests done in test_ring_perf.c
@@ -115,123 +100,6 @@ static struct rte_ring *r;

#define TEST_RING_FULL_EMTPY_ITER 8

-static int
-check_live_watermark_change(__attribute__((unused)) void *dummy)
-{
- uint64_t hz = rte_get_timer_hz();
- void *obj_table[MAX_BULK];
- unsigned watermark, watermark_old = 16;
- uint64_t cur_time, end_time;
- int64_t diff = 0;
- int i, ret;
- unsigned count = 4;
-
- /* init the object table */
- memset(obj_table, 0, sizeof(obj_table));
- end_time = rte_get_timer_cycles() + (hz / 4);
-
- /* check that bulk and watermark are 4 and 32 (respectively) */
- while (diff >= 0) {
-
- /* add in ring until we reach watermark */
- ret = 0;
- for (i = 0; i < 16; i ++) {
- if (ret != 0)
- break;
- ret = rte_ring_enqueue_bulk(r, obj_table, count);
- }
-
- if (ret != -EDQUOT) {
- printf("Cannot enqueue objects, or watermark not "
- "reached (ret=%d)\n", ret);
- return -1;
- }
-
- /* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->watermark;
- if (watermark != watermark_old &&
- (watermark_old != 16 || watermark != 32)) {
- printf("Bad watermark change %u -> %u\n", watermark_old,
- watermark);
- return -1;
- }
- watermark_old = watermark;
-
- /* dequeue objects from ring */
- while (i--) {
- ret = rte_ring_dequeue_bulk(r, obj_table, count);
- if (ret != 0) {
- printf("Cannot dequeue (ret=%d)\n", ret);
- return -1;
- }
- }
-
- cur_time = rte_get_timer_cycles();
- diff = end_time - cur_time;
- }
-
- if (watermark_old != 32 ) {
- printf(" watermark was not updated (wm=%u)\n",
- watermark_old);
- return -1;
- }
-
- return 0;
-}
-
-static int
-test_live_watermark_change(void)
-{
- unsigned lcore_id = rte_lcore_id();
- unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
-
- printf("Test watermark live modification\n");
- rte_ring_set_water_mark(r, 16);
-
- /* launch a thread that will enqueue and dequeue, checking
- * watermark and quota */
- rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
-
- rte_delay_ms(100);
- rte_ring_set_water_mark(r, 32);
- rte_delay_ms(100);
-
- if (rte_eal_wait_lcore(lcore_id2) < 0)
- return -1;
-
- return 0;
-}
-
-/* Test for catch on invalid watermark values */
-static int
-test_set_watermark( void ){
- unsigned count;
- int setwm;
-
- struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
- if(r == NULL){
- printf( " ring lookup failed\n" );
- goto error;
- }
- count = r->size * 2;
- setwm = rte_ring_set_water_mark(r, count);
- if (setwm != -EINVAL){
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
-
- count = 0;
- rte_ring_set_water_mark(r, count);
- if (r->watermark != r->size) {
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
- return 0;
-
-error:
- return -1;
-}
-
/*
* helper routine for test_ring_basic
*/
@@ -418,8 +286,7 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- printf("test watermark and default bulk enqueue / dequeue\n");
- rte_ring_set_water_mark(r, 20);
+ printf("test default bulk enqueue / dequeue\n");
num_elems = 16;

cur_src = src;
@@ -433,8 +300,8 @@ test_ring_basic(void)
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != -EDQUOT) {
- printf("Watermark not exceeded\n");
+ if (ret != 0) {
+ printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
@@ -930,16 +797,6 @@ test_ring(void)
return -1;

/* basic operations */
- if (test_live_watermark_change() < 0)
- return -1;
-
- if ( test_set_watermark() < 0){
- printf ("Test failed to detect invalid parameter\n");
- return -1;
- }
- else
- printf ( "Test detected forced bad watermark values\n");
-
if ( test_create_count_odd() < 0){
printf ("Test failed to detect odd count\n");
return -1;
--
2.9.3
Bruce Richardson
2017-03-24 17:10:05 UTC
Permalink
The local variable i is only used for loop control so define it in
the enqueue and dequeue blocks directly, rather than at the function
level.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
lib/librte_ring/rte_ring.h | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index b05fecb..e801510 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -285,6 +285,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
+ unsigned int i; \
const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
@@ -311,6 +312,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
+ unsigned int i; \
uint32_t idx = cons_head & mask; \
const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
@@ -361,7 +363,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned int max = n;
int success;
- unsigned int i;
uint32_t mask = r->mask;

/* move prod.head atomically */
@@ -431,7 +432,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned int i;
uint32_t mask = r->mask;

prod_head = r->prod.head;
@@ -495,7 +495,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned int i;
uint32_t mask = r->mask;

/* move cons.head atomically */
@@ -566,7 +565,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-03-24 17:10:06 UTC
Permalink
We can write a single common function for head manipulation for enq
and a common one for deq, allowing us to have a single worker function
for enq and deq, rather than two of each. Update all other inline
functions to use the new functions.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V3: renamed parameter "is_mp" in __rte_ring_do_dequeue to the correct "is_sc"
---
lib/librte_ring/rte_ring.c | 4 +-
lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------
2 files changed, 149 insertions(+), 183 deletions(-)

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 25f64f0..5f98c33 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,8 +138,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.single = !!(flags & RING_F_SP_ENQ);
- r->cons.single = !!(flags & RING_F_SC_DEQ);
+ r->prod.single = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;
+ r->cons.single = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;
r->size = count;
r->mask = count - 1;
r->prod.head = r->cons.head = 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index e801510..3d8f738 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -169,6 +169,12 @@ struct rte_ring {
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

+/* @internal defines for passing to the enqueue dequeue worker functions */
+#define __IS_SP 1
+#define __IS_MP 0
+#define __IS_SC 1
+#define __IS_MC 0
+
/**
* Calculate the memory size needed for a ring
*
@@ -287,7 +293,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
#define ENQUEUE_PTRS() do { \
unsigned int i; \
const uint32_t size = r->size; \
- uint32_t idx = prod_head & mask; \
+ uint32_t idx = prod_head & r->mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
r->ring[idx] = obj_table[i]; \
@@ -313,7 +319,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
unsigned int i; \
- uint32_t idx = cons_head & mask; \
+ uint32_t idx = cons_head & r->mask; \
const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
@@ -336,83 +342,72 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} while (0)

/**
- * @internal Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
+ * @internal This function updates the producer head for enqueue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to the ring structure
+ * @param is_sp
+ * Indicates whether multi-producer path is needed or not
* @param n
- * The number of objects to add in the ring from the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ * Returns the amount of free space in the ring BEFORE head was moved
* @return
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *free_entries)
{
- uint32_t prod_head, prod_next;
- uint32_t cons_tail, free_entries;
- const unsigned int max = n;
+ const uint32_t mask = r->mask;
+ unsigned int max = n;
int success;
- uint32_t mask = r->mask;

- /* move prod.head atomically */
do {
/* Reset n to the initial burst count */
n = max;

- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
+ *old_head = r->prod.head;
+ const uint32_t cons_tail = r->cons.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
+ * *old_head > cons_tail). So 'free_entries' is always between 0
* and size(ring)-1. */
- free_entries = (mask + cons_tail - prod_head);
+ *free_entries = (mask + cons_tail - *old_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries))
+ if (unlikely(n > *free_entries))
n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : free_entries;
+ 0 : *free_entries;

if (n == 0)
- goto end;
-
- prod_next = prod_head + n;
- success = rte_atomic32_cmpset(&r->prod.head, prod_head,
- prod_next);
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sp)
+ r->prod.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->prod.head,
+ *old_head, *new_head);
} while (unlikely(success == 0));
-
- /* write entries in ring */
- ENQUEUE_PTRS();
- rte_smp_wmb();
-
- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-end:
- if (free_space != NULL)
- *free_space = free_entries - n;
return n;
}

/**
- * @internal Enqueue several objects on a ring (NOT multi-producers safe).
+ * @internal Enqueue several objects on the ring
*
- * @param r
+ * @param r
* A pointer to the ring structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
@@ -420,44 +415,40 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param is_sp
+ * Indicates whether to use single producer or multi-producer head update
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
* @return
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- uint32_t mask = r->mask;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;

+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
if (n == 0)
goto end;

-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
ENQUEUE_PTRS();
rte_smp_wmb();

+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->prod.tail != prod_head))
+ rte_pause();
+
r->prod.tail = prod_next;
+
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -465,130 +456,112 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
}

/**
- * @internal Dequeue several objects from a ring (multi-consumers safe). When
- * the request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
+ * @internal This function updates the consumer head for dequeue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to the ring structure
+ * @param is_sc
+ * Indicates whether multi-consumer path is needed or not
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ * Returns the number of entries in the ring BEFORE head was moved
* @return
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *entries)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- const unsigned max = n;
+ unsigned int max = n;
int success;
- uint32_t mask = r->mask;

/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
n = max;

- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
+ *old_head = r->cons.head;
+ const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1. */
- entries = (prod_tail - cons_head);
+ *entries = (prod_tail - *old_head);

/* Set the actual entries for dequeue */
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;

if (unlikely(n == 0))
- goto end;
-
- cons_next = cons_head + n;
- success = rte_atomic32_cmpset(&r->cons.head, cons_head,
- cons_next);
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sc)
+ r->cons.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->cons.head, *old_head,
+ *new_head);
} while (unlikely(success == 0));
-
- /* copy in table */
- DEQUEUE_PTRS();
- rte_smp_rmb();
-
- /*
- * If there are other dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head))
- rte_pause();
-
- r->cons.tail = cons_next;
-end:
- if (available != NULL)
- *available = entries - n;
return n;
}

/**
- * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
- * When the request objects are more than the available objects, only dequeue
- * the actual number of objects
+ * @internal Dequeue several objects from the ring
*
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of void * pointers (objects).
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of objects to pull from the ring.
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param is_sc
+ * Indicates whether to use single consumer or multi-consumer head update
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
* @return
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+ int is_sc, unsigned int *available)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- uint32_t mask = r->mask;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = prod_tail - cons_head;
-
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
-
- if (unlikely(entries == 0))
- goto end;
+ uint32_t cons_head, cons_next;
+ uint32_t entries;

- cons_next = cons_head + n;
- r->cons.head = cons_next;
+ n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;

- /* copy in table */
DEQUEUE_PTRS();
rte_smp_rmb();

+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->cons.tail != cons_head))
+ rte_pause();
+
r->cons.tail = cons_next;
+
end:
if (available != NULL)
*available = entries - n;
@@ -614,8 +587,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MP, free_space);
}

/**
@@ -634,8 +607,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SP, free_space);
}

/**
@@ -658,10 +631,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- if (r->prod.single)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
- else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->prod.single, free_space);
}

/**
@@ -741,8 +712,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MC, available);
}

/**
@@ -762,8 +733,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SC, available);
}

/**
@@ -786,10 +757,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
unsigned int *available)
{
- if (r->cons.single)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
- else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->cons.single, available);
}

/**
@@ -972,8 +941,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
}

/**
@@ -992,8 +961,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
}

/**
@@ -1016,10 +985,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- if (r->prod.single)
- return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
- else
- return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
+ r->prod.single, free_space);
}

/**
@@ -1043,8 +1010,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
}

/**
@@ -1065,8 +1032,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
}

/**
@@ -1089,10 +1056,9 @@ static inline unsigned __attribute__((always_inline))
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- if (r->cons.single)
- return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
- else
- return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE,
+ r->cons.single, available);
}

#ifdef __cplusplus
--
2.9.3
Bruce Richardson
2017-03-24 17:10:07 UTC
Permalink
Both producer and consumer use the same logic for updating the tail
index so merge into a single function.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V3: added check for "single" mode in tail update to buypass unneeded check
---
lib/librte_ring/rte_ring.h | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 3d8f738..b352dad 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -341,6 +341,21 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
} while (0)

+static inline __attribute__((always_inline)) void
+update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
+ uint32_t single)
+{
+ /*
+ * If there are other enqueues/dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ if (!single)
+ while (unlikely(ht->tail != old_val))
+ rte_pause();
+
+ ht->tail = new_val;
+}
+
/**
* @internal This function updates the producer head for enqueue
*
@@ -440,15 +455,7 @@ __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-
+ update_tail(&r->prod, prod_head, prod_next, is_sp);
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -553,14 +560,7 @@ __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head))
- rte_pause();
-
- r->cons.tail = cons_next;
+ update_tail(&r->cons, cons_head, cons_next, is_sc);

end:
if (available != NULL)
--
2.9.3
Bruce Richardson
2017-03-24 17:10:08 UTC
Permalink
Modify the enqueue and dequeue macros to support copying any type of
object by passing in the exact object type. Rather than using the "ring"
structure member of rte_ring, which is of type "array of void *", instead
have the macros take the start of the ring a a pointer value, thereby
leaving the rte_ring structure as purely a header value. This allows it
to be reused by other future ring types which can add on extra fields if
they want, or even to have the actual ring elements, of whatever type
stored separate from the ring header.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
lib/librte_ring/rte_ring.h | 68 ++++++++++++++++++++++++----------------------
1 file changed, 36 insertions(+), 32 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index b352dad..f0692d3 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -159,11 +159,7 @@ struct rte_ring {

/** Ring consumer status. */
struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
-
- void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
- * not volatile so need to be careful
- * about compiler re-ordering */
-};
+} __rte_cache_aligned;

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
@@ -290,54 +286,62 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
/* the actual enqueue of pointers on the ring.
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
-#define ENQUEUE_PTRS() do { \
+#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
unsigned int i; \
- const uint32_t size = r->size; \
- uint32_t idx = prod_head & r->mask; \
+ const uint32_t size = (r)->size; \
+ uint32_t idx = prod_head & (r)->mask; \
+ obj_type *ring = (void *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
- r->ring[idx] = obj_table[i]; \
- r->ring[idx+1] = obj_table[i+1]; \
- r->ring[idx+2] = obj_table[i+2]; \
- r->ring[idx+3] = obj_table[i+3]; \
+ ring[idx] = obj_table[i]; \
+ ring[idx+1] = obj_table[i+1]; \
+ ring[idx+2] = obj_table[i+2]; \
+ ring[idx+3] = obj_table[i+3]; \
} \
switch (n & 0x3) { \
- case 3: r->ring[idx++] = obj_table[i++]; \
- case 2: r->ring[idx++] = obj_table[i++]; \
- case 1: r->ring[idx++] = obj_table[i++]; \
+ case 3: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 2: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 1: \
+ ring[idx++] = obj_table[i++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++)\
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
for (idx = 0; i < n; i++, idx++) \
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
} \
-} while(0)
+} while (0)

/* the actual copy of pointers on the ring to obj_table.
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
-#define DEQUEUE_PTRS() do { \
+#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
unsigned int i; \
- uint32_t idx = cons_head & r->mask; \
- const uint32_t size = r->size; \
+ uint32_t idx = cons_head & (r)->mask; \
+ const uint32_t size = (r)->size; \
+ obj_type *ring = (void *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
- obj_table[i] = r->ring[idx]; \
- obj_table[i+1] = r->ring[idx+1]; \
- obj_table[i+2] = r->ring[idx+2]; \
- obj_table[i+3] = r->ring[idx+3]; \
+ obj_table[i] = ring[idx]; \
+ obj_table[i+1] = ring[idx+1]; \
+ obj_table[i+2] = ring[idx+2]; \
+ obj_table[i+3] = ring[idx+3]; \
} \
switch (n & 0x3) { \
- case 3: obj_table[i++] = r->ring[idx++]; \
- case 2: obj_table[i++] = r->ring[idx++]; \
- case 1: obj_table[i++] = r->ring[idx++]; \
+ case 3: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 2: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 1: \
+ obj_table[i++] = ring[idx++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
for (idx = 0; i < n; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
} \
} while (0)

@@ -452,7 +456,7 @@ __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
if (n == 0)
goto end;

- ENQUEUE_PTRS();
+ ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
rte_smp_wmb();

update_tail(&r->prod, prod_head, prod_next, is_sp);
@@ -557,7 +561,7 @@ __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
if (n == 0)
goto end;

- DEQUEUE_PTRS();
+ DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
rte_smp_rmb();

update_tail(&r->cons, cons_head, cons_next, is_sc);
--
2.9.3
Bruce Richardson
2017-03-24 17:10:01 UTC
Permalink
The bulk fns for rings returns 0 for all elements enqueued and negative
for no space. Change that to make them consistent with the burst functions
in returning the number of elements enqueued/dequeued, i.e. 0 or N.
This change also allows the return value from enq/deq to be used directly
without a branch for error checking.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
doc/guides/rel_notes/release_17_05.rst | 11 +++
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
examples/load_balancer/runtime.c | 16 ++-
.../client_server_mp/mp_client/client.c | 8 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/qos_sched/app_thread.c | 8 +-
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 12 ++-
lib/librte_ring/rte_ring.h | 109 +++++++--------------
test/test-pipeline/pipeline_hash.c | 2 +-
test/test-pipeline/runtime.c | 8 +-
test/test/test_ring.c | 46 +++++----
test/test/test_ring_perf.c | 8 +-
14 files changed, 106 insertions(+), 130 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index af907b8..a465c69 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -130,6 +130,17 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * changed the return value of the enqueue and dequeue bulk functions to
+ match that of the burst equivalents. In all cases, ring functions which
+ operate on multiple packets now return the number of elements enqueued
+ or dequeued, as appropriate. The updated functions are:
+
+ - ``rte_ring_mp_enqueue_bulk``
+ - ``rte_ring_sp_enqueue_bulk``
+ - ``rte_ring_enqueue_bulk``
+ - ``rte_ring_mc_dequeue_bulk``
+ - ``rte_ring_sc_dequeue_bulk``
+ - ``rte_ring_dequeue_bulk``

ABI Changes
-----------
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
index 9b69cfe..e3a63c8 100644
--- a/doc/guides/sample_app_ug/server_node_efd.rst
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -286,7 +286,7 @@ repeated infinitely.

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 6944325..82b10bc 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -146,7 +146,7 @@ app_lcore_io_rx_buffer_to_send (
(void **) lp->rx.mbuf_out[worker].array,
bsz);

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -312,7 +312,7 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
(void **) lp->rx.mbuf_out[worker].array,
lp->rx.mbuf_out[worker].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -349,9 +349,8 @@ app_lcore_io_tx(
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

n_mbufs += bsz_rd;

@@ -505,9 +504,8 @@ app_lcore_worker(
(void **) lp->mbuf_in.array,
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@ -559,7 +557,7 @@ app_lcore_worker(

#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@ -572,7 +570,7 @@ app_lcore_worker(
}
#endif

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -609,7 +607,7 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
(void **) lp->mbuf_out[port].array,
lp->mbuf_out[port].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index d4f9ca3..dca9eb9 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -276,14 +276,10 @@ main(int argc, char *argv[])
printf("[Press Ctrl-C to quit ...]\n");

for (;;) {
- uint16_t i, rx_pkts = PKT_READ_SIZE;
+ uint16_t i, rx_pkts;
uint8_t port;

- /* try dequeuing max possible packets first, if that fails, get the
- * most we can. Loop body should only execute once, maximum */
- while (rx_pkts > 0 &&
- unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
- rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index a6dc12d..19c95b2 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) != 0){
+ cl_rx_buf[client].count) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 70fdcdb..dab4594 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) != 0)) {
+ (void **)rx_mbufs, nb_rx) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -180,7 +180,7 @@ app_tx_thread(struct thread_conf **confs)
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
burst_conf.qos_dequeue);
- if (likely(retval == 0)) {
+ if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

conf->counter = 0; /* reset empty read loop counter */
@@ -230,7 +230,9 @@ app_worker_thread(struct thread_conf **confs)
nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
- while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
+ (void **)mbufs, nb_pkt) == 0)
+ ; /* empty body */

conf_idx++;
if (confs[conf_idx] == NULL)
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index a6c0c70..9ec6a05 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) != 0))
+ rx_pkts) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 1a54d1b..3eb7fac 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index b9aa64d..409b860 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -42,26 +42,30 @@ static int
common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 906e8ae..34b438c 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -349,14 +349,10 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -388,7 +384,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -414,7 +410,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -430,14 +426,10 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -457,7 +449,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -474,7 +466,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -495,16 +487,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/

-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -536,7 +523,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
/* Set the actual entries for dequeue */
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -562,7 +549,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

r->cons.tail = cons_next;

- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -580,15 +567,10 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -607,7 +589,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,

if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -623,7 +605,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -639,10 +621,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -659,10 +640,9 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -683,10 +663,9 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -713,7 +692,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -730,7 +709,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -751,10 +730,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.single)
- return rte_ring_sp_enqueue(r, obj);
- else
- return rte_ring_mp_enqueue(r, obj);
+ return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -770,11 +746,9 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -791,11 +765,9 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -815,11 +787,9 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue, no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
if (r->cons.single)
@@ -846,7 +816,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -864,7 +834,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -886,10 +856,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.single)
- return rte_ring_sc_dequeue(r, obj_p);
- else
- return rte_ring_mc_dequeue(r, obj_p);
+ return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 10d2869..1ac0aa8 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -547,6 +547,6 @@ app_main_loop_rx_metadata(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 42a6142..4e20669 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -98,7 +98,7 @@ app_main_loop_rx(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -123,7 +123,7 @@ app_main_loop_worker(void) {
(void **) worker_mbuf->array,
app.burst_size_worker_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

do {
@@ -131,7 +131,7 @@ app_main_loop_worker(void) {
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
app.burst_size_worker_write);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -152,7 +152,7 @@ app_main_loop_tx(void) {
(void **) &app.mbuf_tx[i].array[n_mbufs],
app.burst_size_tx_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

n_mbufs += app.burst_size_tx_read;
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 666a451..112433b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -117,20 +117,18 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rand));
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rsz));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -171,37 +169,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -217,37 +215,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -264,11 +262,11 @@ test_ring_basic(void)
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
}

@@ -294,25 +292,25 @@ test_ring_basic(void)

ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue2\n");
goto fail;
}
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 320c20c..8ccbdef 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();
--
2.9.3
Bruce Richardson
2017-03-24 17:10:02 UTC
Permalink
Add an extra parameter to the ring enqueue burst/bulk functions so that
those functions can optionally return the amount of free space in the
ring. This information can be used by applications in a number of ways,
for instance, with single-producer queues, it provides a max
enqueue size which is guaranteed to work. It can also be used to
implement watermark functionality in apps, replacing the older
functionality with a more flexible version, which enables apps to
implement multiple watermark thresholds, rather than just one.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
doc/guides/rel_notes/release_17_05.rst | 3 +
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 3 +-
examples/load_balancer/runtime.c | 12 ++-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 7 +-
examples/qos_sched/app_thread.c | 4 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 28 ++++---
lib/librte_ring/rte_ring.h | 89 +++++++++++-----------
test/test-pipeline/pipeline_hash.c | 3 +-
test/test-pipeline/runtime.c | 5 +-
test/test/test_link_bonding_mode4.c | 3 +-
test/test/test_pmd_ring_perf.c | 5 +-
test/test/test_ring.c | 55 ++++++-------
test/test/test_ring_perf.c | 16 ++--
test/test/test_table_ports.c | 4 +-
test/test/virtual_pmd.c | 4 +-
22 files changed, 139 insertions(+), 118 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index a465c69..dc1749b 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -130,6 +130,9 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * added an extra parameter to the burst/bulk enqueue functions to
+ return the number of free spaces in the ring after enqueue. This can
+ be used by an application to implement its own watermark functionality.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 6f9cc1a..adbf478 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -102,7 +102,7 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SP_ENQ) {
r->tx_pkts.cnt += nb_tx;
r->err_pkts.cnt += nb_bufs - nb_tx;
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 7b8a759..bb84f13 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -238,7 +238,8 @@ lcore_rx(struct lcore_params *p)
continue;
}

- uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
+ uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs,
+ nb_ret, NULL);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
RTE_LOG_DP(DEBUG, DISTRAPP,
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 82b10bc..1645994 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -144,7 +144,8 @@ app_lcore_io_rx_buffer_to_send (
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- bsz);
+ bsz,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -310,7 +311,8 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- lp->rx.mbuf_out[worker].n_mbufs);
+ lp->rx.mbuf_out[worker].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -553,7 +555,8 @@ app_lcore_worker(
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- bsz_wr);
+ bsz_wr,
+ NULL);

#if APP_STATS
lp->rings_out_iters[port] ++;
@@ -605,7 +608,8 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- lp->mbuf_out[port].n_mbufs);
+ lp->mbuf_out[port].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index 19c95b2..c2b0261 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) == 0){
+ cl_rx_buf[client].count, NULL) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index a448039..569b6da 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -421,8 +421,8 @@ rx_thread(struct rte_ring *ring_out)
pkts[i++]->seqn = seqn++;

/* enqueue to rx_to_workers ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
- nb_rx_pkts);
+ ret = rte_ring_enqueue_burst(ring_out,
+ (void *)pkts, nb_rx_pkts, NULL);
app_stats.rx.enqueue_pkts += ret;
if (unlikely(ret < nb_rx_pkts)) {
app_stats.rx.enqueue_failed_pkts +=
@@ -473,7 +473,8 @@ worker_thread(void *args_ptr)
burst_buffer[i++]->port ^= xor_val;

/* enqueue the modified mbufs to workers_to_tx ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
+ ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
+ burst_size, NULL);
__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
if (unlikely(ret < burst_size)) {
/* Return the mbufs to their respective pool, dropping packets */
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index dab4594..0c81a15 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) == 0)) {
+ (void **)rx_mbufs, nb_rx, NULL) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -231,7 +231,7 @@ app_worker_thread(struct thread_conf **confs)
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
- (void **)mbufs, nb_pkt) == 0)
+ (void **)mbufs, nb_pkt, NULL) == 0)
; /* empty body */

conf_idx++;
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 3eb7fac..597b4c2 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != cl_rx_buf[node].count){
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 51db006..6552199 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -808,7 +808,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
/* Need to enqueue the free slots in global ring. */
n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
cached_free_slots->objs,
- LCORE_CACHE_SIZE);
+ LCORE_CACHE_SIZE, NULL);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 409b860..9b8fd2b 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -43,7 +43,7 @@ common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_mp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
@@ -51,7 +51,7 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_sp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index cc0b5b1..b599d65 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -197,7 +197,7 @@ pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
dup_bufs[d_pkts++] = p;
}

- ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts);
+ ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
if (unlikely(ring_enq < d_pkts)) {
RTE_LOG(DEBUG, PDUMP,
"only %d of packets enqueued to ring\n", ring_enq);
diff --git a/lib/librte_port/rte_port_ras.c b/lib/librte_port/rte_port_ras.c
index c4bb508..4de0945 100644
--- a/lib/librte_port/rte_port_ras.c
+++ b/lib/librte_port/rte_port_ras.c
@@ -167,7 +167,7 @@ send_burst(struct rte_port_ring_writer_ras *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 0df1bcf..c5dbe07 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -241,7 +241,7 @@ send_burst(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -256,7 +256,7 @@ send_burst_mp(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -318,11 +318,11 @@ rte_port_ring_writer_tx_bulk_internal(void *port,

RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
- n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
- n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
@@ -517,7 +517,7 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -527,7 +527,8 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_sp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -550,7 +551,7 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -560,7 +561,8 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_mp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -633,10 +635,12 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
n_pkts_ok =
- rte_ring_mp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
n_pkts_ok =
- rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

if (n_pkts_ok >= n_pkts)
return 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 34b438c..61a4dc8 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -354,20 +354,16 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, prod_next;
uint32_t cons_tail, free_entries;
- const unsigned max = n;
+ const unsigned int max = n;
int success;
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move prod.head atomically */
do {
/* Reset n to the initial burst count */
@@ -382,16 +378,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = (mask + cons_tail - prod_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : free_entries;
+
+ if (n == 0)
+ goto end;

prod_next = prod_head + n;
success = rte_atomic32_cmpset(&r->prod.head, prod_head,
@@ -410,6 +402,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -431,7 +426,8 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
@@ -447,16 +443,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = mask + cons_tail - prod_head;

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+
+ if (n == 0)
+ goto end;
+

prod_next = prod_head + n;
r->prod.head = prod_next;
@@ -466,6 +458,9 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -625,9 +620,10 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -644,9 +640,10 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -667,12 +664,12 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.single)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
}

/**
@@ -692,7 +689,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -709,7 +706,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -730,7 +727,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -976,9 +973,10 @@ struct rte_ring *rte_ring_lookup(const char *name);
*/
static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -995,9 +993,10 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -1018,12 +1017,12 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.single)
- return rte_ring_sp_enqueue_burst(r, obj_table, n);
+ return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_burst(r, obj_table, n);
+ return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
}

/**
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 1ac0aa8..0c6e04f 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -546,7 +546,8 @@ app_main_loop_rx_metadata(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs,
+ NULL);
} while (ret == 0);
}
}
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 4e20669..c06ff54 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -97,7 +97,7 @@ app_main_loop_rx(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs, NULL);
} while (ret == 0);
}
}
@@ -130,7 +130,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
- app.burst_size_worker_write);
+ app.burst_size_worker_write,
+ NULL);
} while (ret == 0);
}
}
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 53caa3e..8df28b4 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -206,7 +206,8 @@ slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
static int
slave_put_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf, size);
+ return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf,
+ size, NULL);
}

static uint16_t
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index af011f7..045a7f2 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -98,7 +98,7 @@ test_single_enqueue_dequeue(void)
const uint64_t sc_start = rte_rdtsc_precise();
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
- rte_ring_enqueue_bulk(r, &burst, 1);
+ rte_ring_enqueue_bulk(r, &burst, 1, NULL);
rte_ring_dequeue_bulk(r, &burst, 1);
}
const uint64_t sc_end = rte_rdtsc_precise();
@@ -131,7 +131,8 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 112433b..b0ca88b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -117,11 +117,12 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
+ NULL) != 0);
TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
@@ -167,19 +168,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -213,19 +214,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -260,7 +261,7 @@ test_ring_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -290,13 +291,13 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
@@ -371,19 +372,19 @@ test_ring_burst_basic(void)

printf("Test SP & SC basic functions \n");
printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK) ;
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -419,7 +420,7 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
goto fail;
@@ -427,14 +428,14 @@ test_ring_burst_basic(void)
}

printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
/* Always one free entry left */
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -444,7 +445,7 @@ test_ring_burst_basic(void)
goto fail;

printf("Test enqueue for a full entry \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;

@@ -486,19 +487,19 @@ test_ring_burst_basic(void)
printf("Test MP & MC basic functions \n");

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -534,7 +535,7 @@ test_ring_burst_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -557,19 +558,19 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK objects */
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -607,7 +608,7 @@ test_ring_burst_basic(void)

printf("Covering rte_ring_enqueue_burst functions \n");

- ret = rte_ring_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
@@ -746,7 +747,7 @@ test_ring_basic_ex(void)
}

/* Covering the ring burst operation */
- ret = rte_ring_enqueue_burst(rp, obj, 2);
+ ret = rte_ring_enqueue_burst(rp, obj, 2, NULL);
if ((ret & RTE_RING_SZ_MASK) != 2) {
printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
goto fail_test;
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 8ccbdef..f95a8e9 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -323,14 +323,16 @@ test_burst_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
@@ -357,14 +359,16 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
diff --git a/test/test/test_table_ports.c b/test/test/test_table_ports.c
index 2532367..395f4f3 100644
--- a/test/test/test_table_ports.c
+++ b/test/test/test_table_ports.c
@@ -80,7 +80,7 @@ test_port_ring_reader(void)
mbuf[0] = (void *)rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- mbuf, 1);
+ mbuf, 1, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);

if (received_pkts < expected_pkts)
@@ -93,7 +93,7 @@ test_port_ring_reader(void)
mbuf[i] = rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
RTE_PORT_IN_BURST_SIZE_MAX);

diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 6e4dcd8..39e070c 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -380,7 +380,7 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
nb_pkts = 0;
else
nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increment opacket count */
dev_private->eth_stats.opackets += nb_pkts;
@@ -496,7 +496,7 @@ virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
vrtl_eth_dev->data->dev_private;

return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

int
--
2.9.3
Thomas Monjalon
2017-03-28 07:12:39 UTC
Permalink
Post by Bruce Richardson
Add an extra parameter to the ring enqueue burst/bulk functions so that
those functions can optionally return the amount of free space in the
ring. This information can be used by applications in a number of ways,
for instance, with single-producer queues, it provides a max
enqueue size which is guaranteed to work. It can also be used to
implement watermark functionality in apps, replacing the older
functionality with a more flexible version, which enables apps to
implement multiple watermark thresholds, rather than just one.
There is a an error with this patch and crypto drivers:


drivers/crypto/kasumi/rte_kasumi_pmd.c:362:32: fatal error:
too few arguments to function call, expected 4, have 3
(void **)ops, processed_ops);
^
rte_ring.h:1018:1: note: 'rte_ring_enqueue_burst' declared here


drivers/crypto/snow3g/rte_snow3g_pmd.c:366:31: fatal error:
too few arguments to function call, expected 4, have 3
(void **)ops, processed_ops);
^
rte_ring.h:1018:1: note: 'rte_ring_enqueue_burst' declared here
Bruce Richardson
2017-03-28 08:16:34 UTC
Permalink
Post by Thomas Monjalon
Post by Bruce Richardson
Add an extra parameter to the ring enqueue burst/bulk functions so that
those functions can optionally return the amount of free space in the
ring. This information can be used by applications in a number of ways,
for instance, with single-producer queues, it provides a max
enqueue size which is guaranteed to work. It can also be used to
implement watermark functionality in apps, replacing the older
functionality with a more flexible version, which enables apps to
implement multiple watermark thresholds, rather than just one.
too few arguments to function call, expected 4, have 3
(void **)ops, processed_ops);
^
rte_ring.h:1018:1: note: 'rte_ring_enqueue_burst' declared here
too few arguments to function call, expected 4, have 3
(void **)ops, processed_ops);
^
rte_ring.h:1018:1: note: 'rte_ring_enqueue_burst' declared here
Yes, I'm still working through this patchset with crypto drivers enabled
myself. Patch 9 also has issues with some of the other drivers - though
I don't have this kasumi one enabled on my system myself. I'll need to
do a v4, hopefully today. Sorry for not flagging this sooner, I didn't
know you were going to try again to apply the set, so I thought I had
more time to report the results of testing with the crypto drivers.

/Bruce
Bruce Richardson
2017-03-24 17:10:03 UTC
Permalink
Add an extra parameter to the ring dequeue burst/bulk functions so that
those functions can optionally return the amount of remaining objs in the
ring. This information can be used by applications in a number of ways,
for instance, with single-consumer queues, it provides a max
dequeue size which is guaranteed to work.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
app/pdump/main.c | 2 +-
doc/guides/rel_notes/release_17_05.rst | 8 ++
drivers/crypto/null/null_crypto_pmd.c | 2 +-
drivers/net/bonding/rte_eth_bond_pmd.c | 3 +-
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 2 +-
examples/load_balancer/runtime.c | 6 +-
.../client_server_mp/mp_client/client.c | 3 +-
examples/packet_ordering/main.c | 6 +-
examples/qos_sched/app_thread.c | 6 +-
examples/quota_watermark/qw/main.c | 5 +-
examples/server_node_efd/node/node.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 3 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_port/rte_port_frag.c | 3 +-
lib/librte_port/rte_port_ring.c | 6 +-
lib/librte_ring/rte_ring.h | 90 +++++++++++-----------
test/test-pipeline/runtime.c | 6 +-
test/test/test_link_bonding_mode4.c | 3 +-
test/test/test_pmd_ring_perf.c | 7 +-
test/test/test_ring.c | 54 ++++++-------
test/test/test_ring_perf.c | 20 +++--
test/test/test_table_acl.c | 2 +-
test/test/test_table_pipeline.c | 2 +-
test/test/test_table_ports.c | 8 +-
test/test/virtual_pmd.c | 4 +-
26 files changed, 145 insertions(+), 114 deletions(-)

diff --git a/app/pdump/main.c b/app/pdump/main.c
index b88090d..3b13753 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -496,7 +496,7 @@ pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)

/* first dequeue packets from ring of primary process */
const uint16_t nb_in_deq = rte_ring_dequeue_burst(ring,
- (void *)rxtx_bufs, BURST_SIZE);
+ (void *)rxtx_bufs, BURST_SIZE, NULL);
stats->dequeue_pkts += nb_in_deq;

if (nb_in_deq) {
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index dc1749b..f0eeac2 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -133,6 +133,8 @@ API Changes
* added an extra parameter to the burst/bulk enqueue functions to
return the number of free spaces in the ring after enqueue. This can
be used by an application to implement its own watermark functionality.
+ * added an extra parameter to the burst/bulk dequeue functions to return
+ the number elements remaining in the ring after dequeue.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
@@ -145,6 +147,12 @@ API Changes
- ``rte_ring_sc_dequeue_bulk``
- ``rte_ring_dequeue_bulk``

+ NOTE: the above functions all have different parameters as well as
+ different return values, due to the other listed changes above. This
+ means that all instances of the functions in existing code will be
+ flagged by the compiler. The return value usage should be checked
+ while fixing the compiler error due to the extra parameter.
+
ABI Changes
-----------

diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index ed5a9fc..f68ec8d 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -155,7 +155,7 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index f3ac9e2..96638af 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1008,7 +1008,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
struct port *port = &mode_8023ad_ports[slaves[i]];

slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
+ slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
+ NULL);
slave_nb_pkts[i] = slave_slow_nb_pkts[i];

for (j = 0; j < slave_slow_nb_pkts[i]; j++)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index adbf478..77ef3a1 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -88,7 +88,7 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts.cnt += nb_rx;
else
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index bb84f13..90c9613 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -330,7 +330,7 @@ lcore_tx(struct rte_ring *in_r)

struct rte_mbuf *bufs[BURST_SIZE];
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
- (void *)bufs, BURST_SIZE);
+ (void *)bufs, BURST_SIZE, NULL);
app_stats.tx.dequeue_pkts += nb_rx;

/* if we get no traffic, flush anything we have */
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 1645994..8192c08 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -349,7 +349,8 @@ app_lcore_io_tx(
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
- bsz_rd);
+ bsz_rd,
+ NULL);

if (unlikely(ret == 0))
continue;
@@ -504,7 +505,8 @@ app_lcore_worker(
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
- bsz_rd);
+ bsz_rd,
+ NULL);

if (unlikely(ret == 0))
continue;
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index dca9eb9..01b535c 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -279,7 +279,8 @@ main(int argc, char *argv[])
uint16_t i, rx_pkts;
uint8_t port;

- rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
+ PKT_READ_SIZE, NULL);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 569b6da..49ae35b 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -462,7 +462,7 @@ worker_thread(void *args_ptr)

/* dequeue the mbufs from rx_to_workers ring */
burst_size = rte_ring_dequeue_burst(ring_in,
- (void *)burst_buffer, MAX_PKTS_BURST);
+ (void *)burst_buffer, MAX_PKTS_BURST, NULL);
if (unlikely(burst_size == 0))
continue;

@@ -510,7 +510,7 @@ send_thread(struct send_thread_args *args)

/* deque the mbufs from workers_to_tx ring */
nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);

if (unlikely(nb_dq_mbufs == 0))
continue;
@@ -595,7 +595,7 @@ tx_thread(struct rte_ring *ring_in)

/* deque the mbufs from workers_to_tx ring */
dqnum = rte_ring_dequeue_burst(ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);

if (unlikely(dqnum == 0))
continue;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 0c81a15..15f117f 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -179,7 +179,7 @@ app_tx_thread(struct thread_conf **confs)

while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
- burst_conf.qos_dequeue);
+ burst_conf.qos_dequeue, NULL);
if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

@@ -218,7 +218,7 @@ app_worker_thread(struct thread_conf **confs)

/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
@@ -254,7 +254,7 @@ app_mixed_thread(struct thread_conf **confs)

/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 57df8ef..2dcddea 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -247,7 +247,8 @@ pipeline_stage(__attribute__((unused)) void *args)
}

/* Dequeue up to quota mbuf from rx */
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
if (unlikely(nb_dq_pkts < 0))
continue;

@@ -305,7 +306,7 @@ send_stage(__attribute__((unused)) void *args)

/* Dequeue packets from tx and send them */
nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
- (void *) tx_pkts, *quota);
+ (void *) tx_pkts, *quota, NULL);
rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);

/* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index 9ec6a05..f780b92 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) == 0))
+ rx_pkts, NULL) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6552199..645c0cf 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -536,7 +536,8 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
if (cached_free_slots->len == 0) {
/* Need to get another burst of free slots from global ring */
n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
- cached_free_slots->objs, LCORE_CACHE_SIZE);
+ cached_free_slots->objs,
+ LCORE_CACHE_SIZE, NULL);
if (n_slots == 0)
return -ENOSPC;

diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 9b8fd2b..5c132bf 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -58,14 +58,14 @@ static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_mc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_sc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_port/rte_port_frag.c b/lib/librte_port/rte_port_frag.c
index 0fcace9..320407e 100644
--- a/lib/librte_port/rte_port_frag.c
+++ b/lib/librte_port/rte_port_frag.c
@@ -186,7 +186,8 @@ rte_port_ring_reader_frag_rx(void *port,
/* If "pkts" buffer is empty, read packet burst from ring */
if (p->n_pkts == 0) {
p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
- (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX,
+ NULL);
RTE_PORT_RING_READER_FRAG_STATS_PKTS_IN_ADD(p, p->n_pkts);
if (p->n_pkts == 0)
return n_pkts_out;
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index c5dbe07..85fad44 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -111,7 +111,8 @@ rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;

- nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

return nb_rx;
@@ -124,7 +125,8 @@ rte_port_ring_multi_reader_rx(void *port, struct rte_mbuf **pkts,
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;

- nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

return nb_rx;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 61a4dc8..b05fecb 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -488,7 +488,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
@@ -497,11 +498,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
@@ -516,15 +512,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = (prod_tail - cons_head);

/* Set the actual entries for dequeue */
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(n == 0))
+ goto end;

cons_next = cons_head + n;
success = rte_atomic32_cmpset(&r->cons.head, cons_head,
@@ -543,7 +535,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_pause();

r->cons.tail = cons_next;
-
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}

@@ -567,7 +561,8 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
@@ -582,15 +577,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* and size(ring)-1. */
entries = prod_tail - cons_head;

- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(entries == 0))
+ goto end;

cons_next = cons_head + n;
r->cons.head = cons_next;
@@ -600,6 +591,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}

@@ -746,9 +740,11 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}

/**
@@ -765,9 +761,11 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}

/**
@@ -787,12 +785,13 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
+ unsigned int *available)
{
if (r->cons.single)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
}

/**
@@ -813,7 +812,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -831,7 +830,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -853,7 +852,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -1043,9 +1042,11 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}

/**
@@ -1063,9 +1064,11 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}

/**
@@ -1085,12 +1088,13 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* - Number of objects dequeued
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
if (r->cons.single)
- return rte_ring_sc_dequeue_burst(r, obj_table, n);
+ return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_burst(r, obj_table, n);
+ return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
}

#ifdef __cplusplus
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index c06ff54..8970e1c 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -121,7 +121,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_rx[i],
(void **) worker_mbuf->array,
- app.burst_size_worker_read);
+ app.burst_size_worker_read,
+ NULL);

if (ret == 0)
continue;
@@ -151,7 +152,8 @@ app_main_loop_tx(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_tx[i],
(void **) &app.mbuf_tx[i].array[n_mbufs],
- app.burst_size_tx_read);
+ app.burst_size_tx_read,
+ NULL);

if (ret == 0)
continue;
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 8df28b4..15091b1 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -193,7 +193,8 @@ static uint8_t lacpdu_rx_count[RTE_MAX_ETHPORTS] = {0, };
static int
slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf, size);
+ return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf,
+ size, NULL);
}

/*
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index 045a7f2..004882a 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -67,7 +67,7 @@ test_empty_dequeue(void)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();

const uint64_t eth_start = rte_rdtsc();
@@ -99,7 +99,7 @@ test_single_enqueue_dequeue(void)
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
rte_ring_enqueue_bulk(r, &burst, 1, NULL);
- rte_ring_dequeue_bulk(r, &burst, 1);
+ rte_ring_dequeue_bulk(r, &burst, 1, NULL);
}
const uint64_t sc_end = rte_rdtsc_precise();
rte_compiler_barrier();
@@ -133,7 +133,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, (void *)burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index b0ca88b..858ebc1 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -119,7 +119,8 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
__func__, i, rand);
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
NULL) != 0);
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand,
+ NULL) == rand);

/* fill the ring */
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
@@ -129,7 +130,8 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz,
+ NULL) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -186,19 +188,19 @@ test_ring_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -232,19 +234,19 @@ test_ring_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -265,7 +267,7 @@ test_ring_basic(void)
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -303,13 +305,13 @@ test_ring_basic(void)
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue2\n");
@@ -390,19 +392,19 @@ test_ring_burst_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1) ;
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -451,19 +453,19 @@ test_ring_burst_basic(void)

printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK entries */
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -505,19 +507,19 @@ test_ring_burst_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -539,7 +541,7 @@ test_ring_burst_basic(void)
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -578,19 +580,19 @@ test_ring_burst_basic(void)

printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available objects - the exact MAX_BULK */
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -613,7 +615,7 @@ test_ring_burst_basic(void)
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret != 2)
goto fail;
@@ -753,7 +755,7 @@ test_ring_basic_ex(void)
goto fail_test;
}

- ret = rte_ring_dequeue_burst(rp, obj, 2);
+ ret = rte_ring_dequeue_burst(rp, obj, 2, NULL);
if (ret != 2) {
printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
goto fail_test;
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index f95a8e9..ed89896 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -152,12 +152,12 @@ test_empty_dequeue(void)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t mc_end = rte_rdtsc();

printf("SC empty dequeue: %.2F\n",
@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();

@@ -325,7 +325,8 @@ test_burst_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

@@ -333,7 +334,8 @@ test_burst_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();

@@ -361,7 +363,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

@@ -369,7 +372,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();

diff --git a/test/test/test_table_acl.c b/test/test/test_table_acl.c
index b3bfda4..4d43be7 100644
--- a/test/test/test_table_acl.c
+++ b/test/test/test_table_acl.c
@@ -713,7 +713,7 @@ test_pipeline_single_filter(int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;

- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0) {
printf("Got no objects from ring %d - error code %d\n",
i, ret);
diff --git a/test/test/test_table_pipeline.c b/test/test/test_table_pipeline.c
index 36bfeda..b58aa5d 100644
--- a/test/test/test_table_pipeline.c
+++ b/test/test/test_table_pipeline.c
@@ -494,7 +494,7 @@ test_pipeline_single_filter(int test_type, int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;

- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0)
printf("Got no objects from ring %d - error code %d\n",
i, ret);
diff --git a/test/test/test_table_ports.c b/test/test/test_table_ports.c
index 395f4f3..39592ce 100644
--- a/test/test/test_table_ports.c
+++ b/test/test/test_table_ports.c
@@ -163,7 +163,7 @@ test_port_ring_writer(void)
rte_port_ring_writer_ops.f_flush(port);
expected_pkts = 1;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -7;
@@ -178,7 +178,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -8;
@@ -193,7 +193,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -8;
@@ -208,7 +208,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -9;
diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 39e070c..b209355 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -342,7 +342,7 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
dev_private = vrtl_eth_dev->data->dev_private;

rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increments ipackets count */
dev_private->eth_stats.ipackets += rx_count;
@@ -508,7 +508,7 @@ virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,

dev_private = vrtl_eth_dev->data->dev_private;
return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

static uint8_t
--
2.9.3
Bruce Richardson
2017-03-24 17:10:04 UTC
Permalink
Now that the enqueue function returns the amount of space in the ring,
we can use that to replace the old watermark functionality. Update the
example app to do so, and re-enable it in the examples Makefile.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
examples/Makefile | 2 +-
examples/quota_watermark/qw/init.c | 5 +++--
examples/quota_watermark/qw/main.c | 16 ++++++++++------
examples/quota_watermark/qw/main.h | 1 +
examples/quota_watermark/qwctl/commands.c | 4 ++--
examples/quota_watermark/qwctl/qwctl.c | 2 ++
examples/quota_watermark/qwctl/qwctl.h | 1 +
7 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/examples/Makefile b/examples/Makefile
index 19cd5ad..da2bfdd 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-#DIRS-y += quota_watermark
+DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index 95a9f94..6babfea 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -140,7 +140,7 @@ void init_ring(int lcore_id, uint8_t port_id)
if (ring == NULL)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

- rte_ring_set_water_mark(ring, 80 * RING_SIZE / 100);
+ *high_watermark = 80 * RING_SIZE / 100;

rings[lcore_id][port_id] = ring;
}
@@ -168,10 +168,11 @@ setup_shared_variables(void)
const struct rte_memzone *qw_memzone;

qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
- 2 * sizeof(int), rte_socket_id(), 0);
+ 3 * sizeof(int), rte_socket_id(), 0);
if (qw_memzone == NULL)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

quota = qw_memzone->addr;
low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 2dcddea..bdb8a43 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -67,6 +67,7 @@ struct ether_fc_frame {

int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;

uint8_t port_pairs[RTE_MAX_ETHPORTS];

@@ -158,6 +159,7 @@ receive_stage(__attribute__((unused)) void *args)
uint16_t nb_rx_pkts;

unsigned int lcore_id;
+ unsigned int free;

struct rte_mbuf *pkts[MAX_PKT_QUOTA];
struct rte_ring *ring;
@@ -189,13 +191,13 @@ receive_stage(__attribute__((unused)) void *args)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
(uint16_t) *quota);
ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
- nb_rx_pkts);
- if (ret == -EDQUOT) {
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
ring_state[port_id] = RING_OVERLOADED;
send_pause_frame(port_id, 1337);
}

- else if (ret == -ENOBUFS) {
+ if (ret == 0) {

/*
* Return mbufs to the pool,
@@ -217,6 +219,7 @@ pipeline_stage(__attribute__((unused)) void *args)
uint8_t port_id;

unsigned int lcore_id, previous_lcore_id;
+ unsigned int free;

void *pkts[MAX_PKT_QUOTA];
struct rte_ring *rx, *tx;
@@ -253,11 +256,12 @@ pipeline_stage(__attribute__((unused)) void *args)
continue;

/* Enqueue them on tx */
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
ring_state[port_id] = RING_OVERLOADED;

- else if (ret == -ENOBUFS) {
+ if (ret == 0) {

/*
* Return mbufs to the pool,
diff --git a/examples/quota_watermark/qw/main.h b/examples/quota_watermark/qw/main.h
index 545ba42..8c8e311 100644
--- a/examples/quota_watermark/qw/main.h
+++ b/examples/quota_watermark/qw/main.h
@@ -43,6 +43,7 @@ enum ring_state {

extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;

extern uint8_t port_pairs[RTE_MAX_ETHPORTS];

diff --git a/examples/quota_watermark/qwctl/commands.c b/examples/quota_watermark/qwctl/commands.c
index 036bf80..5cac0e1 100644
--- a/examples/quota_watermark/qwctl/commands.c
+++ b/examples/quota_watermark/qwctl/commands.c
@@ -140,8 +140,8 @@ cmd_set_handler(__attribute__((unused)) void *parsed_result,
else
if (tokens->value >= *low_watermark * 100 / RING_SIZE
&& tokens->value <= 100)
- rte_ring_set_water_mark(ring,
- tokens->value * RING_SIZE / 100);
+ *high_watermark = tokens->value *
+ RING_SIZE / 100;
else
cmdline_printf(cl,
"ring high watermark must be between %u%% and 100%%\n",
diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c
index 3a85cc3..7e7a396 100644
--- a/examples/quota_watermark/qwctl/qwctl.c
+++ b/examples/quota_watermark/qwctl/qwctl.c
@@ -55,6 +55,7 @@

int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;


static void
@@ -68,6 +69,7 @@ setup_shared_variables(void)

quota = qw_memzone->addr;
low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}

int main(int argc, char **argv)
diff --git a/examples/quota_watermark/qwctl/qwctl.h b/examples/quota_watermark/qwctl/qwctl.h
index 8d146e5..545914b 100644
--- a/examples/quota_watermark/qwctl/qwctl.h
+++ b/examples/quota_watermark/qwctl/qwctl.h
@@ -36,5 +36,6 @@

extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;

#endif /* _MAIN_H_ */
--
2.9.3
Bruce Richardson
2017-03-28 20:35:52 UTC
Permalink
This patchset make a set of, sometimes non-backward compatible, cleanup
changes to the rte_ring code in order to improve it. The resulting code is
shorter, since the existing functions are restructured to reduce code
duplication, as well as being more consistent in behaviour. The specific
changes made are explained in each patch which makes that change.

Changes in V4:
* Fixed errors in the crypto PMDs due to missed updates to their
enqueue/dequeue calls (p8 & p9).
* Updated quota_watermarks documentation to match changed code (p10)

Changes in V3:
* removed anonymous union for sp_enqueue and sc_dequeue variables (p2)
* fixed parameter to dequeue function which was mistakenly named is_mp
instead of is_sc (as described in the comments) (p12)
* skipped unneeded check in tail pointer update fn for sp/sc operation (p13)

Changes in V2:
* Eliminated extra cacheline padding where cachelines are 128B
* Renamed rte_ring_ht_ptr struct to rte_ring_headtail
* Removed missed references to ring watermarks in test code and docs

This patchset is largely the same as that posted previously on-list as
an RFC:
http://dpdk.org/ml/archives/dev/2017-February/thread.html#56982

Changes in V1 from RFC:
* Included release notes updates as changes are made in each patch
* Fixed some missed comment updates when changing the code
* Separated some initial fixup patches from this set to send separately
* Dropped the final two patches for an rte_event_ring, as not relevant
for this set. That can be done as a separate set later.
* The macros for copying the pointers have an extra parameter added,
indicating the start of the ring buffer itself. This allows more
flexibility for reusing them in other ring implementations.

Bruce Richardson (14):
ring: remove split cacheline build setting
ring: create common structure for prod and cons metadata
ring: eliminate duplication of size and mask fields
ring: remove debug setting
ring: remove the yield when waiting for tail update
ring: remove watermark support
ring: make bulk and burst fn return vals consistent
ring: allow enqueue fns to return free space value
ring: allow dequeue fns to return remaining entry count
examples/quota_watermark: use ring space for watermarks
ring: reduce scope of local variables
ring: separate out head index manipulation for enq/deq
ring: create common function for updating tail idx
ring: make ring struct and enq/deq macros type agnostic

app/pdump/main.c | 2 +-
config/common_base | 3 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 -
doc/guides/prog_guide/ring_lib.rst | 15 -
doc/guides/prog_guide/writing_efficient_code.rst | 2 +-
doc/guides/rel_notes/release_17_05.rst | 33 +
doc/guides/sample_app_ug/quota_watermark.rst | 148 ++--
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 2 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 2 +-
drivers/crypto/armv8/rte_armv8_pmd.c | 8 +-
drivers/crypto/kasumi/rte_kasumi_pmd.c | 6 +-
drivers/crypto/null/null_crypto_pmd.c | 2 +-
drivers/crypto/openssl/rte_openssl_pmd.c | 2 +-
drivers/crypto/snow3g/rte_snow3g_pmd.c | 6 +-
drivers/crypto/zuc/rte_zuc_pmd.c | 4 +-
drivers/net/bonding/rte_eth_bond_pmd.c | 3 +-
drivers/net/ring/rte_eth_ring.c | 4 +-
examples/distributor/main.c | 5 +-
examples/load_balancer/runtime.c | 34 +-
.../client_server_mp/mp_client/client.c | 9 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 13 +-
examples/qos_sched/app_thread.c | 14 +-
examples/quota_watermark/qw/init.c | 5 +-
examples/quota_watermark/qw/main.c | 21 +-
examples/quota_watermark/qw/main.h | 1 +
examples/quota_watermark/qwctl/commands.c | 4 +-
examples/quota_watermark/qwctl/qwctl.c | 2 +
examples/quota_watermark/qwctl/qwctl.h | 1 +
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 5 +-
lib/librte_mempool/rte_mempool_ring.c | 12 +-
lib/librte_pdump/rte_pdump.c | 4 +-
lib/librte_port/rte_port_frag.c | 3 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 46 +-
lib/librte_ring/rte_ring.c | 76 +--
lib/librte_ring/rte_ring.h | 759 ++++++++-------------
test/test-pipeline/pipeline_hash.c | 5 +-
test/test-pipeline/runtime.c | 19 +-
test/test/autotest_test_funcs.py | 7 -
test/test/commands.c | 52 --
test/test/test_link_bonding_mode4.c | 6 +-
test/test/test_pmd_ring_perf.c | 12 +-
test/test/test_ring.c | 704 +++----------------
test/test/test_ring_perf.c | 36 +-
test/test/test_table_acl.c | 2 +-
test/test/test_table_pipeline.c | 2 +-
test/test/test_table_ports.c | 12 +-
test/test/virtual_pmd.c | 8 +-
52 files changed, 664 insertions(+), 1472 deletions(-)
--
2.9.3
Bruce Richardson
2017-03-28 20:35:53 UTC
Permalink
Users compiling DPDK should not need to know or care about the arrangement
of cachelines in the rte_ring structure. Therefore just remove the build
option and set the structures to be always split. On platforms with 64B
cachelines, for improved performance use 128B rather than 64B alignment
since it stops the producer and consumer data being on adjacent cachelines.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V2: Limit the cacheline * 2 alignment to platforms with < 128B line size
---
config/common_base | 1 -
doc/guides/rel_notes/release_17_05.rst | 7 +++++++
lib/librte_ring/rte_ring.c | 2 --
lib/librte_ring/rte_ring.h | 16 ++++++++++------
4 files changed, 17 insertions(+), 9 deletions(-)

diff --git a/config/common_base b/config/common_base
index 37aa1e1..c394651 100644
--- a/config/common_base
+++ b/config/common_base
@@ -453,7 +453,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 2a045b3..8b66ac3 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -127,6 +127,13 @@ API Changes
* The LPM ``next_hop`` field is extended from 8 bits to 21 bits for IPv6
while keeping ABI compatibility.

+* **Reworked rte_ring library**
+
+ The rte_ring library has been reworked and updated. The following changes
+ have been made to it:
+
+ * removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
+
ABI Changes
-----------

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a108..4bc6da1 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,10 +127,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 72ccca5..399ae3b 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -139,6 +139,14 @@ struct rte_ring_debug_stats {

struct rte_memzone; /* forward declaration, so as not to require memzone.h */

+#if RTE_CACHE_LINE_SIZE < 128
+#define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#else
+#define PROD_ALIGN RTE_CACHE_LINE_SIZE
+#define CONS_ALIGN RTE_CACHE_LINE_SIZE
+#endif
+
/**
* An RTE ring structure.
*
@@ -168,7 +176,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Producer head. */
volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ } prod __rte_aligned(PROD_ALIGN);

/** Ring consumer status. */
struct cons {
@@ -177,11 +185,7 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
volatile uint32_t head; /**< Consumer head. */
volatile uint32_t tail; /**< Consumer tail. */
-#ifdef RTE_RING_SPLIT_PROD_CONS
- } cons __rte_cache_aligned;
-#else
- } cons;
-#endif
+ } cons __rte_aligned(CONS_ALIGN);

#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
--
2.9.3
Bruce Richardson
2017-03-28 20:35:54 UTC
Permalink
create a common structure to hold the metadata for the producer and
the consumer, since both need essentially the same information - the
head and tail values, the ring size and mask.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V3: removed union and replaced with "single" variable
V2: renamed the shared structure based on maintainer feedback.
---
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_ring.c | 12 ++++++------
lib/librte_ring/rte_ring.c | 4 ++--
lib/librte_ring/rte_ring.h | 41 +++++++++++++++++++----------------------
4 files changed, 28 insertions(+), 31 deletions(-)

diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index a580a6a..cc0b5b1 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -740,7 +740,7 @@ pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
rte_errno = EINVAL;
return -1;
}
- if (ring->prod.sp_enqueue || ring->cons.sc_dequeue) {
+ if (ring->prod.single || ring->cons.single) {
RTE_LOG(ERR, PDUMP, "ring with either SP or SC settings"
" is not valid for pdump, should have MP and MC settings\n");
rte_errno = EINVAL;
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 3b9d3d0..0df1bcf 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -73,8 +73,8 @@ rte_port_ring_reader_create_internal(void *params, int socket_id,
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->cons.sc_dequeue && is_multi) ||
- (!(conf->ring->cons.sc_dequeue) && !is_multi)) {
+ (conf->ring->cons.single && is_multi) ||
+ (!(conf->ring->cons.single) && !is_multi)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
}
@@ -198,8 +198,8 @@ rte_port_ring_writer_create_internal(void *params, int socket_id,
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->prod.sp_enqueue && is_multi) ||
- (!(conf->ring->prod.sp_enqueue) && !is_multi) ||
+ (conf->ring->prod.single && is_multi) ||
+ (!(conf->ring->prod.single) && !is_multi) ||
(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
@@ -467,8 +467,8 @@ rte_port_ring_writer_nodrop_create_internal(void *params, int socket_id,
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->prod.sp_enqueue && is_multi) ||
- (!(conf->ring->prod.sp_enqueue) && !is_multi) ||
+ (conf->ring->prod.single && is_multi) ||
+ (!(conf->ring->prod.single) && !is_multi) ||
(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 4bc6da1..93a8692 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -145,8 +145,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
return -ENAMETOOLONG;
r->flags = flags;
r->prod.watermark = count;
- r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
- r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
+ r->prod.single = !!(flags & RING_F_SP_ENQ);
+ r->cons.single = !!(flags & RING_F_SC_DEQ);
r->prod.size = r->cons.size = count;
r->prod.mask = r->cons.mask = count-1;
r->prod.head = r->cons.head = 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 399ae3b..331c94f 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -147,6 +147,16 @@ struct rte_memzone; /* forward declaration, so as not to require memzone.h */
#define CONS_ALIGN RTE_CACHE_LINE_SIZE
#endif

+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_headtail {
+ volatile uint32_t head; /**< Prod/consumer head. */
+ volatile uint32_t tail; /**< Prod/consumer tail. */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t single; /**< True if single prod/cons */
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */
+};
+
/**
* An RTE ring structure.
*
@@ -169,23 +179,10 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */

/** Ring producer status. */
- struct prod {
- uint32_t watermark; /**< Maximum items before EDQUOT. */
- uint32_t sp_enqueue; /**< True, if single producer. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Producer head. */
- volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_aligned(PROD_ALIGN);
+ struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);

/** Ring consumer status. */
- struct cons {
- uint32_t sc_dequeue; /**< True, if single consumer. */
- uint32_t size; /**< Size of the ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Consumer head. */
- volatile uint32_t tail; /**< Consumer tail. */
- } cons __rte_aligned(CONS_ALIGN);
+ struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);

#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
@@ -837,7 +834,7 @@ static inline int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue_bulk(r, obj_table, n);
else
return rte_ring_mp_enqueue_bulk(r, obj_table, n);
@@ -904,7 +901,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue(r, obj);
else
return rte_ring_mp_enqueue(r, obj);
@@ -975,7 +972,7 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue_bulk(r, obj_table, n);
else
return rte_ring_mc_dequeue_bulk(r, obj_table, n);
@@ -1039,7 +1036,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue(r, obj_p);
else
return rte_ring_mc_dequeue(r, obj_p);
@@ -1206,7 +1203,7 @@ static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue_burst(r, obj_table, n);
else
return rte_ring_mp_enqueue_burst(r, obj_table, n);
@@ -1274,7 +1271,7 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
static inline unsigned __attribute__((always_inline))
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue_burst(r, obj_table, n);
else
return rte_ring_mc_dequeue_burst(r, obj_table, n);
--
2.9.3
Bruce Richardson
2017-03-28 20:35:55 UTC
Permalink
The size and mask fields are duplicated in both the producer and
consumer data structures. Move them out of that into the top level
structure so they are not duplicated.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
lib/librte_ring/rte_ring.c | 20 ++++++++++----------
lib/librte_ring/rte_ring.h | 32 ++++++++++++++++----------------
test/test/test_ring.c | 6 +++---
3 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 93a8692..93485d4 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -144,11 +144,11 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.watermark = count;
+ r->watermark = count;
r->prod.single = !!(flags & RING_F_SP_ENQ);
r->cons.single = !!(flags & RING_F_SC_DEQ);
- r->prod.size = r->cons.size = count;
- r->prod.mask = r->cons.mask = count-1;
+ r->size = count;
+ r->mask = count - 1;
r->prod.head = r->cons.head = 0;
r->prod.tail = r->cons.tail = 0;

@@ -269,14 +269,14 @@ rte_ring_free(struct rte_ring *r)
int
rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
{
- if (count >= r->prod.size)
+ if (count >= r->size)
return -EINVAL;

/* if count is 0, disable the watermarking */
if (count == 0)
- count = r->prod.size;
+ count = r->size;

- r->prod.watermark = count;
+ r->watermark = count;
return 0;
}

@@ -291,17 +291,17 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)

fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->prod.watermark == r->prod.size)
+ if (r->watermark == r->size)
fprintf(f, " watermark=0\n");
else
- fprintf(f, " watermark=%"PRIu32"\n", r->prod.watermark);
+ fprintf(f, " watermark=%"PRIu32"\n", r->watermark);

/* sum and dump statistics */
#ifdef RTE_LIBRTE_RING_DEBUG
@@ -318,7 +318,7 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
}
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 331c94f..d650215 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -151,10 +151,7 @@ struct rte_memzone; /* forward declaration, so as not to require memzone.h */
struct rte_ring_headtail {
volatile uint32_t head; /**< Prod/consumer head. */
volatile uint32_t tail; /**< Prod/consumer tail. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
uint32_t single; /**< True if single prod/cons */
- uint32_t watermark; /**< Max items before EDQUOT in producer. */
};

/**
@@ -174,9 +171,12 @@ struct rte_ring {
* next time the ABI changes
*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
- int flags; /**< Flags supplied at creation. */
+ int flags; /**< Flags supplied at creation. */
const struct rte_memzone *memzone;
/**< Memzone, if any, containing the rte_ring */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -355,7 +355,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
- const uint32_t size = r->prod.size; \
+ const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
@@ -382,7 +382,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
uint32_t idx = cons_head & mask; \
- const uint32_t size = r->cons.size; \
+ const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
obj_table[i] = r->ring[idx]; \
@@ -437,7 +437,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
const unsigned max = n;
int success;
unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;
int ret;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -485,7 +485,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
@@ -544,7 +544,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
unsigned i;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;
int ret;

prod_head = r->prod.head;
@@ -580,7 +580,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
@@ -630,7 +630,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
const unsigned max = n;
int success;
unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -727,7 +727,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
unsigned i;
- uint32_t mask = r->prod.mask;
+ uint32_t mask = r->mask;

cons_head = r->cons.head;
prod_tail = r->prod.tail;
@@ -1056,7 +1056,7 @@ rte_ring_full(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
+ return ((cons_tail - prod_tail - 1) & r->mask) == 0;
}

/**
@@ -1089,7 +1089,7 @@ rte_ring_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->prod.mask;
+ return (prod_tail - cons_tail) & r->mask;
}

/**
@@ -1105,7 +1105,7 @@ rte_ring_free_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->prod.mask;
+ return (cons_tail - prod_tail - 1) & r->mask;
}

/**
@@ -1119,7 +1119,7 @@ rte_ring_free_count(const struct rte_ring *r)
static inline unsigned int
rte_ring_get_size(const struct rte_ring *r)
{
- return r->prod.size;
+ return r->size;
}

/**
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index ebcb896..5f09097 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -148,7 +148,7 @@ check_live_watermark_change(__attribute__((unused)) void *dummy)
}

/* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->prod.watermark;
+ watermark = r->watermark;
if (watermark != watermark_old &&
(watermark_old != 16 || watermark != 32)) {
printf("Bad watermark change %u -> %u\n", watermark_old,
@@ -213,7 +213,7 @@ test_set_watermark( void ){
printf( " ring lookup failed\n" );
goto error;
}
- count = r->prod.size*2;
+ count = r->size * 2;
setwm = rte_ring_set_water_mark(r, count);
if (setwm != -EINVAL){
printf("Test failed to detect invalid watermark count value\n");
@@ -222,7 +222,7 @@ test_set_watermark( void ){

count = 0;
rte_ring_set_water_mark(r, count);
- if (r->prod.watermark != r->prod.size) {
+ if (r->watermark != r->size) {
printf("Test failed to detect invalid watermark count value\n");
goto error;
}
--
2.9.3
Bruce Richardson
2017-03-28 20:35:56 UTC
Permalink
The debug option only provided statistics to the user, most of
which could be tracked by the application itself. Remove this as a
compile time option, and feature, simplifying the code.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
config/common_base | 1 -
doc/guides/prog_guide/ring_lib.rst | 7 -
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.c | 41 ----
lib/librte_ring/rte_ring.h | 97 +-------
test/test/test_ring.c | 410 ---------------------------------
6 files changed, 13 insertions(+), 544 deletions(-)

diff --git a/config/common_base b/config/common_base
index c394651..69e91ae 100644
--- a/config/common_base
+++ b/config/common_base
@@ -452,7 +452,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_LIBRTE_RING_DEBUG=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index 9f69753..d4ab502 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -110,13 +110,6 @@ Once an enqueue operation reaches the high water mark, the producer is notified,

This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.

-Debug
-~~~~~
-
-When debug is enabled (CONFIG_RTE_LIBRTE_RING_DEBUG is set),
-the library stores some per-ring statistic counters about the number of enqueues/dequeues.
-These statistics are per-core to avoid concurrent accesses or atomic operations.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 8b66ac3..50123c2 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -133,6 +133,7 @@ API Changes
have been made to it:

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
+ * removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 93485d4..934ce87 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -131,12 +131,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_LIBRTE_RING_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif

/* init the ring structure */
memset(r, 0, sizeof(*r));
@@ -284,11 +278,6 @@ rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats sum;
- unsigned lcore_id;
-#endif
-
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
@@ -302,36 +291,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " watermark=0\n");
else
fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
-
- /* sum and dump statistics */
-#ifdef RTE_LIBRTE_RING_DEBUG
- memset(&sum, 0, sizeof(sum));
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
- sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
- sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
- sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
- sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
- sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
- sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
- sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
- sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
- sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
- }
- fprintf(f, " size=%"PRIu32"\n", r->size);
- fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
- fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
- fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
- fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
- fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
- fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
- fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
- fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
- fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
- fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
-#else
- fprintf(f, " no statistics available\n");
-#endif
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index d650215..2777b41 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -109,24 +109,6 @@ enum rte_ring_queue_behavior {
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};

-#ifdef RTE_LIBRTE_RING_DEBUG
-/**
- * A structure that stores the ring statistics (per-lcore).
- */
-struct rte_ring_debug_stats {
- uint64_t enq_success_bulk; /**< Successful enqueues number. */
- uint64_t enq_success_objs; /**< Objects successfully enqueued. */
- uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
- uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
- uint64_t enq_fail_bulk; /**< Failed enqueues number. */
- uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
- uint64_t deq_success_bulk; /**< Successful dequeues number. */
- uint64_t deq_success_objs; /**< Objects successfully dequeued. */
- uint64_t deq_fail_bulk; /**< Failed dequeues number. */
- uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
-} __rte_cache_aligned;
-#endif
-
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
@@ -184,10 +166,6 @@ struct rte_ring {
/** Ring consumer status. */
struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);

-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
-#endif
-
void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
* not volatile so need to be careful
* about compiler re-ordering */
@@ -199,27 +177,6 @@ struct rte_ring {
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
- * @internal When debug is enabled, store ring statistics.
- * @param r
- * A pointer to the ring.
- * @param name
- * The name of the statistics field to increment in the ring.
- * @param n
- * The number to add to the object-oriented statistics.
- */
-#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- if (__lcore_id < RTE_MAX_LCORE) { \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
- } \
- } while(0)
-#else
-#define __RING_STAT_ADD(r, name, n) do {} while(0)
-#endif
-
-/**
* Calculate the memory size needed for a ring
*
* This function returns the number of bytes needed for a ring, given
@@ -460,17 +417,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -485,15 +437,11 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

/*
* If there are other enqueues in progress that preceded us,
@@ -557,17 +505,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -580,15 +523,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

r->prod.tail = prod_next;
return ret;
@@ -652,16 +591,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

/* Set the actual entries for dequeue */
if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -691,7 +625,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
sched_yield();
}
}
- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -738,16 +671,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = prod_tail - cons_head;

if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -759,7 +687,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
}
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 5f09097..3891f5d 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -763,412 +763,6 @@ test_ring_burst_basic(void)
return -1;
}

-static int
-test_ring_stats(void)
-{
-
-#ifndef RTE_LIBRTE_RING_DEBUG
- printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
- return 0;
-#else
- void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
- int ret;
- unsigned i;
- unsigned num_items = 0;
- unsigned failed_enqueue_ops = 0;
- unsigned failed_enqueue_items = 0;
- unsigned failed_dequeue_ops = 0;
- unsigned failed_dequeue_items = 0;
- unsigned last_enqueue_ops = 0;
- unsigned last_enqueue_items = 0;
- unsigned last_quota_ops = 0;
- unsigned last_quota_items = 0;
- unsigned lcore_id = rte_lcore_id();
- struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
-
- printf("Test the ring stats.\n");
-
- /* Reset the watermark in case it was set in another test. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Allocate some dummy object pointers. */
- src = malloc(RING_SIZE*2*sizeof(void *));
- if (src == NULL)
- goto fail;
-
- for (i = 0; i < RING_SIZE*2 ; i++) {
- src[i] = (void *)(unsigned long)i;
- }
-
- /* Allocate some memory for copied objects. */
- dst = malloc(RING_SIZE*2*sizeof(void *));
- if (dst == NULL)
- goto fail;
-
- memset(dst, 0, RING_SIZE*2*sizeof(void *));
-
- /* Set the head and tail pointers. */
- cur_src = src;
- cur_dst = dst;
-
- /* Do Enqueue tests. */
- printf("Test the dequeue stats.\n");
-
- /* Fill the ring up to RING_SIZE -1. */
- printf("Fill the ring.\n");
- for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
- rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- }
-
- /* Adjust for final enqueue = MAX_BULK -1. */
- cur_src--;
-
- printf("Verify that the ring is full.\n");
- if (rte_ring_full(r) != 1)
- goto fail;
-
-
- printf("Verify the enqueue success stats.\n");
- /* Stats should match above enqueue operations to fill the ring. */
- if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Current max objects is RING_SIZE -1. */
- if (ring_stats->enq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any failures yet. */
- if (ring_stats->enq_fail_bulk != 0)
- goto fail;
- if (ring_stats->enq_fail_objs != 0)
- goto fail;
-
-
- printf("Test stats for SP burst enqueue to a full ring.\n");
- num_items = 2;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for SP bulk enqueue to a full ring.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP burst enqueue to a full ring.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP bulk enqueue to a full ring.\n");
- num_items = 16;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- /* Do Dequeue tests. */
- printf("Test the dequeue stats.\n");
-
- printf("Empty the ring.\n");
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* There was only RING_SIZE -1 objects to dequeue. */
- cur_dst++;
-
- printf("Verify ring is empty.\n");
- if (1 != rte_ring_empty(r))
- goto fail;
-
- printf("Verify the dequeue success stats.\n");
- /* Stats should match above dequeue operations. */
- if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Objects dequeued is RING_SIZE -1. */
- if (ring_stats->deq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any dequeue failure stats yet. */
- if (ring_stats->deq_fail_bulk != 0)
- goto fail;
-
- printf("Test stats for SC burst dequeue with an empty ring.\n");
- num_items = 2;
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for SC bulk dequeue with an empty ring.\n");
- num_items = 4;
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC burst dequeue with an empty ring.\n");
- num_items = 8;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC bulk dequeue with an empty ring.\n");
- num_items = 16;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test total enqueue/dequeue stats.\n");
- /* At this point the enqueue and dequeue stats should be the same. */
- if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
- goto fail;
- if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
- goto fail;
- if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
- goto fail;
- if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
- goto fail;
-
-
- /* Watermark Tests. */
- printf("Test the watermark/quota stats.\n");
-
- printf("Verify the initial watermark stats.\n");
- /* Watermark stats should be 0 since there is no watermark. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Set a watermark. */
- rte_ring_set_water_mark(r, 16);
-
- /* Reset pointers. */
- cur_src = src;
- cur_dst = dst;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue below watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should still be 0. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Success stats should have increased. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
- goto fail;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue at watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != 1)
- goto fail;
- if (ring_stats->enq_quota_objs != num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP burst enqueue above watermark.\n");
- num_items = 1;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP burst enqueue above watermark.\n");
- num_items = 2;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP bulk enqueue above watermark.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP bulk enqueue above watermark.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- printf("Test watermark success stats.\n");
- /* Success stats should be same as last non-watermarked enqueue. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items)
- goto fail;
-
-
- /* Cleanup. */
-
- /* Empty the ring. */
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* Reset the watermark. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Free memory before test completed */
- free(src);
- free(dst);
- return 0;
-
-fail:
- free(src);
- free(dst);
- return -1;
-#endif
-}
-
/*
* it will always fail to create ring with a wrong ring size number in this function
*/
@@ -1335,10 +929,6 @@ test_ring(void)
if (test_ring_basic() < 0)
return -1;

- /* ring stats */
- if (test_ring_stats() < 0)
- return -1;
-
/* basic operations */
if (test_live_watermark_change() < 0)
return -1;
--
2.9.3
Bruce Richardson
2017-03-28 20:35:57 UTC
Permalink
There was a compile time setting to enable a ring to yield when
it entered a loop in mp or mc rings waiting for the tail pointer update.
Build time settings are not recommended for enabling/disabling features,
and since this was off by default, remove it completely. If needed, a
runtime enabled equivalent can be used.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
config/common_base | 1 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 ----
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.h | 35 +++++--------------------
4 files changed, 7 insertions(+), 35 deletions(-)

diff --git a/config/common_base b/config/common_base
index 69e91ae..2d54ddf 100644
--- a/config/common_base
+++ b/config/common_base
@@ -452,7 +452,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
# Compile librte_mempool
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 10a10a8..7c39cd2 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -352,11 +352,6 @@ Known Issues

3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.

- ``RTE_RING_PAUSE_REP_COUNT`` is defined for rte_ring to reduce contention. It's mainly for case 2, a yield is issued after number of times pause repeat.
-
- It adds a sched_yield() syscall if the thread spins for too long while waiting on the other thread to finish its operations on the ring.
- This gives the preempted thread a chance to proceed and finish with the ring enqueue/dequeue operation.
-
+ rte_timer

Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 50123c2..25d8549 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -134,6 +134,7 @@ API Changes

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
+ * removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 2777b41..f8ac7f5 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -114,11 +114,6 @@ enum rte_ring_queue_behavior {
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)

-#ifndef RTE_RING_PAUSE_REP_COUNT
-#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
- * if RTE_RING_PAUSE_REP not defined. */
-#endif
-
struct rte_memzone; /* forward declaration, so as not to require memzone.h */

#if RTE_CACHE_LINE_SIZE < 128
@@ -393,7 +388,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -447,18 +442,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->prod.tail != prod_head)) {
+ while (unlikely(r->prod.tail != prod_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->prod.tail = prod_next;
return ret;
}
@@ -491,7 +477,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -568,7 +554,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -613,18 +599,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* If there are other dequeues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->cons.tail != cons_head)) {
+ while (unlikely(r->cons.tail != cons_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -659,7 +636,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-03-28 20:35:58 UTC
Permalink
Remove the watermark support. A future commit will add support for having
enqueue functions return the amount of free space in the ring, which will
allow applications to implement their own watermark checks, while also
being more useful to the app.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V2: fix missed references to watermarks in v1
---
doc/guides/prog_guide/ring_lib.rst | 8 --
doc/guides/rel_notes/release_17_05.rst | 2 +
examples/Makefile | 2 +-
lib/librte_ring/rte_ring.c | 23 -----
lib/librte_ring/rte_ring.h | 58 +------------
test/test/autotest_test_funcs.py | 7 --
test/test/commands.c | 52 ------------
test/test/test_ring.c | 149 +--------------------------------
8 files changed, 8 insertions(+), 293 deletions(-)

diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index d4ab502..b31ab7a 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -102,14 +102,6 @@ Name
A ring is identified by a unique name.
It is not possible to create two rings with the same name (rte_ring_create() returns NULL if this is attempted).

-Water Marking
-~~~~~~~~~~~~~
-
-The ring can have a high water mark (threshold).
-Once an enqueue operation reaches the high water mark, the producer is notified, if the water mark is configured.
-
-This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 25d8549..084b359 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -135,6 +135,8 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
+ * removed the function ``rte_ring_set_water_mark`` as part of a general
+ removal of watermarks support in the library.

ABI Changes
-----------
diff --git a/examples/Makefile b/examples/Makefile
index da2bfdd..19cd5ad 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-DIRS-y += quota_watermark
+#DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 934ce87..25f64f0 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,7 +138,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->watermark = count;
r->prod.single = !!(flags & RING_F_SP_ENQ);
r->cons.single = !!(flags & RING_F_SC_DEQ);
r->size = count;
@@ -256,24 +255,6 @@ rte_ring_free(struct rte_ring *r)
rte_free(te);
}

-/*
- * change the high water mark. If *count* is 0, water marking is
- * disabled
- */
-int
-rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
-{
- if (count >= r->size)
- return -EINVAL;
-
- /* if count is 0, disable the watermarking */
- if (count == 0)
- count = r->size;
-
- r->watermark = count;
- return 0;
-}
-
/* dump the status of the ring on the console */
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
@@ -287,10 +268,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->watermark == r->size)
- fprintf(f, " watermark=0\n");
- else
- fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index f8ac7f5..906e8ae 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -153,7 +153,6 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */
uint32_t size; /**< Size of ring. */
uint32_t mask; /**< Mask (size-1) of ring. */
- uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -168,7 +167,6 @@ struct rte_ring {

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
-#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
@@ -274,26 +272,6 @@ struct rte_ring *rte_ring_create(const char *name, unsigned count,
void rte_ring_free(struct rte_ring *r);

/**
- * Change the high water mark.
- *
- * If *count* is 0, water marking is disabled. Otherwise, it is set to the
- * *count* value. The *count* value must be greater than 0 and less
- * than the ring size.
- *
- * This function can be called at any time (not necessarily at
- * initialization).
- *
- * @param r
- * A pointer to the ring structure.
- * @param count
- * The new water mark value.
- * @return
- * - 0: Success; water mark changed.
- * - -EINVAL: Invalid water mark value.
- */
-int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
-
-/**
* Dump the status of the ring to a file.
*
* @param f
@@ -374,8 +352,6 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -390,7 +366,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
int success;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -431,13 +406,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
/*
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
@@ -446,7 +414,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -465,8 +433,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -479,7 +445,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_next, free_entries;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

prod_head = r->prod.head;
cons_tail = r->cons.tail;
@@ -508,15 +473,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -682,8 +640,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -704,8 +660,6 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -730,8 +684,6 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -756,8 +708,6 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -775,8 +725,6 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -798,8 +746,6 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
diff --git a/test/test/autotest_test_funcs.py b/test/test/autotest_test_funcs.py
index 1c5f390..8da8fcd 100644
--- a/test/test/autotest_test_funcs.py
+++ b/test/test/autotest_test_funcs.py
@@ -292,11 +292,4 @@ def ring_autotest(child, test_name):
elif index == 2:
return -1, "Fail [Timeout]"

- child.sendline("set_watermark test 100")
- child.sendline("dump_ring test")
- index = child.expect([" watermark=100",
- pexpect.TIMEOUT], timeout=1)
- if index != 0:
- return -1, "Fail [Bad watermark]"
-
return 0, "Success"
diff --git a/test/test/commands.c b/test/test/commands.c
index 2df46b0..551c81d 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -228,57 +228,6 @@ cmdline_parse_inst_t cmd_dump_one = {

/****************/

-struct cmd_set_ring_result {
- cmdline_fixed_string_t set;
- cmdline_fixed_string_t name;
- uint32_t value;
-};
-
-static void cmd_set_ring_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_set_ring_result *res = parsed_result;
- struct rte_ring *r;
- int ret;
-
- r = rte_ring_lookup(res->name);
- if (r == NULL) {
- cmdline_printf(cl, "Cannot find ring\n");
- return;
- }
-
- if (!strcmp(res->set, "set_watermark")) {
- ret = rte_ring_set_water_mark(r, res->value);
- if (ret != 0)
- cmdline_printf(cl, "Cannot set water mark\n");
- }
-}
-
-cmdline_parse_token_string_t cmd_set_ring_set =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set,
- "set_watermark");
-
-cmdline_parse_token_string_t cmd_set_ring_name =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL);
-
-cmdline_parse_token_num_t cmd_set_ring_value =
- TOKEN_NUM_INITIALIZER(struct cmd_set_ring_result, value, UINT32);
-
-cmdline_parse_inst_t cmd_set_ring = {
- .f = cmd_set_ring_parsed, /* function to call */
- .data = NULL, /* 2nd arg of func */
- .help_str = "set watermark: "
- "set_watermark <ring_name> <value>",
- .tokens = { /* token list, NULL terminated */
- (void *)&cmd_set_ring_set,
- (void *)&cmd_set_ring_name,
- (void *)&cmd_set_ring_value,
- NULL,
- },
-};
-
-/****************/
-
struct cmd_quit_result {
cmdline_fixed_string_t quit;
};
@@ -419,7 +368,6 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_autotest,
(cmdline_parse_inst_t *)&cmd_dump,
(cmdline_parse_inst_t *)&cmd_dump_one,
- (cmdline_parse_inst_t *)&cmd_set_ring,
(cmdline_parse_inst_t *)&cmd_quit,
(cmdline_parse_inst_t *)&cmd_set_rxtx,
(cmdline_parse_inst_t *)&cmd_set_rxtx_anchor,
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 3891f5d..666a451 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -78,21 +78,6 @@
* - Dequeue one object, two objects, MAX_BULK objects
* - Check that dequeued pointers are correct
*
- * - Test watermark and default bulk enqueue/dequeue:
- *
- * - Set watermark
- * - Set default bulk value
- * - Enqueue objects, check that -EDQUOT is returned when
- * watermark is exceeded
- * - Check that dequeued pointers are correct
- *
- * #. Check live watermark change
- *
- * - Start a loop on another lcore that will enqueue and dequeue
- * objects in a ring. It will monitor the value of watermark.
- * - At the same time, change the watermark on the master lcore.
- * - The slave lcore will check that watermark changes from 16 to 32.
- *
* #. Performance tests.
*
* Tests done in test_ring_perf.c
@@ -115,123 +100,6 @@ static struct rte_ring *r;

#define TEST_RING_FULL_EMTPY_ITER 8

-static int
-check_live_watermark_change(__attribute__((unused)) void *dummy)
-{
- uint64_t hz = rte_get_timer_hz();
- void *obj_table[MAX_BULK];
- unsigned watermark, watermark_old = 16;
- uint64_t cur_time, end_time;
- int64_t diff = 0;
- int i, ret;
- unsigned count = 4;
-
- /* init the object table */
- memset(obj_table, 0, sizeof(obj_table));
- end_time = rte_get_timer_cycles() + (hz / 4);
-
- /* check that bulk and watermark are 4 and 32 (respectively) */
- while (diff >= 0) {
-
- /* add in ring until we reach watermark */
- ret = 0;
- for (i = 0; i < 16; i ++) {
- if (ret != 0)
- break;
- ret = rte_ring_enqueue_bulk(r, obj_table, count);
- }
-
- if (ret != -EDQUOT) {
- printf("Cannot enqueue objects, or watermark not "
- "reached (ret=%d)\n", ret);
- return -1;
- }
-
- /* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->watermark;
- if (watermark != watermark_old &&
- (watermark_old != 16 || watermark != 32)) {
- printf("Bad watermark change %u -> %u\n", watermark_old,
- watermark);
- return -1;
- }
- watermark_old = watermark;
-
- /* dequeue objects from ring */
- while (i--) {
- ret = rte_ring_dequeue_bulk(r, obj_table, count);
- if (ret != 0) {
- printf("Cannot dequeue (ret=%d)\n", ret);
- return -1;
- }
- }
-
- cur_time = rte_get_timer_cycles();
- diff = end_time - cur_time;
- }
-
- if (watermark_old != 32 ) {
- printf(" watermark was not updated (wm=%u)\n",
- watermark_old);
- return -1;
- }
-
- return 0;
-}
-
-static int
-test_live_watermark_change(void)
-{
- unsigned lcore_id = rte_lcore_id();
- unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
-
- printf("Test watermark live modification\n");
- rte_ring_set_water_mark(r, 16);
-
- /* launch a thread that will enqueue and dequeue, checking
- * watermark and quota */
- rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
-
- rte_delay_ms(100);
- rte_ring_set_water_mark(r, 32);
- rte_delay_ms(100);
-
- if (rte_eal_wait_lcore(lcore_id2) < 0)
- return -1;
-
- return 0;
-}
-
-/* Test for catch on invalid watermark values */
-static int
-test_set_watermark( void ){
- unsigned count;
- int setwm;
-
- struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
- if(r == NULL){
- printf( " ring lookup failed\n" );
- goto error;
- }
- count = r->size * 2;
- setwm = rte_ring_set_water_mark(r, count);
- if (setwm != -EINVAL){
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
-
- count = 0;
- rte_ring_set_water_mark(r, count);
- if (r->watermark != r->size) {
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
- return 0;
-
-error:
- return -1;
-}
-
/*
* helper routine for test_ring_basic
*/
@@ -418,8 +286,7 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- printf("test watermark and default bulk enqueue / dequeue\n");
- rte_ring_set_water_mark(r, 20);
+ printf("test default bulk enqueue / dequeue\n");
num_elems = 16;

cur_src = src;
@@ -433,8 +300,8 @@ test_ring_basic(void)
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != -EDQUOT) {
- printf("Watermark not exceeded\n");
+ if (ret != 0) {
+ printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
@@ -930,16 +797,6 @@ test_ring(void)
return -1;

/* basic operations */
- if (test_live_watermark_change() < 0)
- return -1;
-
- if ( test_set_watermark() < 0){
- printf ("Test failed to detect invalid parameter\n");
- return -1;
- }
- else
- printf ( "Test detected forced bad watermark values\n");
-
if ( test_create_count_odd() < 0){
printf ("Test failed to detect odd count\n");
return -1;
--
2.9.3
Bruce Richardson
2017-03-28 20:35:59 UTC
Permalink
The bulk fns for rings returns 0 for all elements enqueued and negative
for no space. Change that to make them consistent with the burst functions
in returning the number of elements enqueued/dequeued, i.e. 0 or N.
This change also allows the return value from enq/deq to be used directly
without a branch for error checking.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
doc/guides/rel_notes/release_17_05.rst | 11 +++
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
examples/load_balancer/runtime.c | 16 ++-
.../client_server_mp/mp_client/client.c | 8 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/qos_sched/app_thread.c | 8 +-
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 12 ++-
lib/librte_ring/rte_ring.h | 109 +++++++--------------
test/test-pipeline/pipeline_hash.c | 2 +-
test/test-pipeline/runtime.c | 8 +-
test/test/test_ring.c | 46 +++++----
test/test/test_ring_perf.c | 8 +-
14 files changed, 106 insertions(+), 130 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 084b359..6da2612 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -137,6 +137,17 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * changed the return value of the enqueue and dequeue bulk functions to
+ match that of the burst equivalents. In all cases, ring functions which
+ operate on multiple packets now return the number of elements enqueued
+ or dequeued, as appropriate. The updated functions are:
+
+ - ``rte_ring_mp_enqueue_bulk``
+ - ``rte_ring_sp_enqueue_bulk``
+ - ``rte_ring_enqueue_bulk``
+ - ``rte_ring_mc_dequeue_bulk``
+ - ``rte_ring_sc_dequeue_bulk``
+ - ``rte_ring_dequeue_bulk``

ABI Changes
-----------
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
index 9b69cfe..e3a63c8 100644
--- a/doc/guides/sample_app_ug/server_node_efd.rst
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -286,7 +286,7 @@ repeated infinitely.

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 6944325..82b10bc 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -146,7 +146,7 @@ app_lcore_io_rx_buffer_to_send (
(void **) lp->rx.mbuf_out[worker].array,
bsz);

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -312,7 +312,7 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
(void **) lp->rx.mbuf_out[worker].array,
lp->rx.mbuf_out[worker].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -349,9 +349,8 @@ app_lcore_io_tx(
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

n_mbufs += bsz_rd;

@@ -505,9 +504,8 @@ app_lcore_worker(
(void **) lp->mbuf_in.array,
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@ -559,7 +557,7 @@ app_lcore_worker(

#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@ -572,7 +570,7 @@ app_lcore_worker(
}
#endif

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -609,7 +607,7 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
(void **) lp->mbuf_out[port].array,
lp->mbuf_out[port].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index d4f9ca3..dca9eb9 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -276,14 +276,10 @@ main(int argc, char *argv[])
printf("[Press Ctrl-C to quit ...]\n");

for (;;) {
- uint16_t i, rx_pkts = PKT_READ_SIZE;
+ uint16_t i, rx_pkts;
uint8_t port;

- /* try dequeuing max possible packets first, if that fails, get the
- * most we can. Loop body should only execute once, maximum */
- while (rx_pkts > 0 &&
- unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
- rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index a6dc12d..19c95b2 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) != 0){
+ cl_rx_buf[client].count) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 70fdcdb..dab4594 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) != 0)) {
+ (void **)rx_mbufs, nb_rx) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -180,7 +180,7 @@ app_tx_thread(struct thread_conf **confs)
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
burst_conf.qos_dequeue);
- if (likely(retval == 0)) {
+ if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

conf->counter = 0; /* reset empty read loop counter */
@@ -230,7 +230,9 @@ app_worker_thread(struct thread_conf **confs)
nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
- while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
+ (void **)mbufs, nb_pkt) == 0)
+ ; /* empty body */

conf_idx++;
if (confs[conf_idx] == NULL)
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index a6c0c70..9ec6a05 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) != 0))
+ rx_pkts) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 1a54d1b..3eb7fac 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index b9aa64d..409b860 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -42,26 +42,30 @@ static int
common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 906e8ae..34b438c 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -349,14 +349,10 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -388,7 +384,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -414,7 +410,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -430,14 +426,10 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -457,7 +449,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -474,7 +466,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -495,16 +487,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/

-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -536,7 +523,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
/* Set the actual entries for dequeue */
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -562,7 +549,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

r->cons.tail = cons_next;

- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -580,15 +567,10 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -607,7 +589,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,

if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -623,7 +605,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -639,10 +621,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -659,10 +640,9 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -683,10 +663,9 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -713,7 +692,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -730,7 +709,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -751,10 +730,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.single)
- return rte_ring_sp_enqueue(r, obj);
- else
- return rte_ring_mp_enqueue(r, obj);
+ return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -770,11 +746,9 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -791,11 +765,9 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -815,11 +787,9 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue, no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
if (r->cons.single)
@@ -846,7 +816,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -864,7 +834,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -886,10 +856,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.single)
- return rte_ring_sc_dequeue(r, obj_p);
- else
- return rte_ring_mc_dequeue(r, obj_p);
+ return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 10d2869..1ac0aa8 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -547,6 +547,6 @@ app_main_loop_rx_metadata(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 42a6142..4e20669 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -98,7 +98,7 @@ app_main_loop_rx(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -123,7 +123,7 @@ app_main_loop_worker(void) {
(void **) worker_mbuf->array,
app.burst_size_worker_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

do {
@@ -131,7 +131,7 @@ app_main_loop_worker(void) {
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
app.burst_size_worker_write);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -152,7 +152,7 @@ app_main_loop_tx(void) {
(void **) &app.mbuf_tx[i].array[n_mbufs],
app.burst_size_tx_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

n_mbufs += app.burst_size_tx_read;
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 666a451..112433b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -117,20 +117,18 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rand));
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rsz));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -171,37 +169,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -217,37 +215,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -264,11 +262,11 @@ test_ring_basic(void)
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
}

@@ -294,25 +292,25 @@ test_ring_basic(void)

ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue2\n");
goto fail;
}
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 320c20c..8ccbdef 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();
--
2.9.3
Bruce Richardson
2017-03-28 20:36:00 UTC
Permalink
Add an extra parameter to the ring enqueue burst/bulk functions so that
those functions can optionally return the amount of free space in the
ring. This information can be used by applications in a number of ways,
for instance, with single-producer queues, it provides a max
enqueue size which is guaranteed to work. It can also be used to
implement watermark functionality in apps, replacing the older
functionality with a more flexible version, which enables apps to
implement multiple watermark thresholds, rather than just one.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V4: Added missing updates to crypto PMDs
---
doc/guides/rel_notes/release_17_05.rst | 3 +
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
drivers/crypto/armv8/rte_armv8_pmd.c | 6 +-
drivers/crypto/kasumi/rte_kasumi_pmd.c | 4 +-
drivers/crypto/snow3g/rte_snow3g_pmd.c | 4 +-
drivers/crypto/zuc/rte_zuc_pmd.c | 2 +-
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 3 +-
examples/load_balancer/runtime.c | 12 ++-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 7 +-
examples/qos_sched/app_thread.c | 4 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 28 ++++---
lib/librte_ring/rte_ring.h | 89 +++++++++++-----------
test/test-pipeline/pipeline_hash.c | 3 +-
test/test-pipeline/runtime.c | 5 +-
test/test/test_link_bonding_mode4.c | 3 +-
test/test/test_pmd_ring_perf.c | 5 +-
test/test/test_ring.c | 55 ++++++-------
test/test/test_ring_perf.c | 16 ++--
test/test/test_table_ports.c | 4 +-
test/test/virtual_pmd.c | 4 +-
27 files changed, 149 insertions(+), 126 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 6da2612..b361a98 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -137,6 +137,9 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * added an extra parameter to the burst/bulk enqueue functions to
+ return the number of free spaces in the ring after enqueue. This can
+ be used by an application to implement its own watermark functionality.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
index e3a63c8..c2a5f20 100644
--- a/doc/guides/sample_app_ug/server_node_efd.rst
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -286,7 +286,7 @@ repeated infinitely.

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != cl_rx_buf[node].count){
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index d2b88a3..37ecd7b 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -739,13 +739,15 @@ armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
goto enqueue_err;
}

- retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i);
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
qp->stats.enqueued_count += retval;

return retval;

enqueue_err:
- retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i);
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
if (ops[i] != NULL)
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;

diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 234921e..1dd05cb 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -359,7 +359,7 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
}

enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;

@@ -410,7 +410,7 @@ process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
}

enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
- processed_op);
+ processed_op, NULL);
qp->qp_stats.enqueued_count += enqueued_op;
*accumulated_enqueued_ops += enqueued_op;

diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index ca97271..01c4e1c 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -363,7 +363,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
}

enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;

@@ -414,7 +414,7 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
}

enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)&op, processed_op);
+ (void **)&op, processed_op, NULL);
qp->qp_stats.enqueued_count += enqueued_op;
*accumulated_enqueued_ops += enqueued_op;

diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index 6f9c06a..5e2dbf5 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -339,7 +339,7 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
}

enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;

diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 6f9cc1a..adbf478 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -102,7 +102,7 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SP_ENQ) {
r->tx_pkts.cnt += nb_tx;
r->err_pkts.cnt += nb_bufs - nb_tx;
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 7b8a759..bb84f13 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -238,7 +238,8 @@ lcore_rx(struct lcore_params *p)
continue;
}

- uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
+ uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs,
+ nb_ret, NULL);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
RTE_LOG_DP(DEBUG, DISTRAPP,
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 82b10bc..1645994 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -144,7 +144,8 @@ app_lcore_io_rx_buffer_to_send (
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- bsz);
+ bsz,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -310,7 +311,8 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- lp->rx.mbuf_out[worker].n_mbufs);
+ lp->rx.mbuf_out[worker].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -553,7 +555,8 @@ app_lcore_worker(
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- bsz_wr);
+ bsz_wr,
+ NULL);

#if APP_STATS
lp->rings_out_iters[port] ++;
@@ -605,7 +608,8 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- lp->mbuf_out[port].n_mbufs);
+ lp->mbuf_out[port].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index 19c95b2..c2b0261 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) == 0){
+ cl_rx_buf[client].count, NULL) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index a448039..569b6da 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -421,8 +421,8 @@ rx_thread(struct rte_ring *ring_out)
pkts[i++]->seqn = seqn++;

/* enqueue to rx_to_workers ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
- nb_rx_pkts);
+ ret = rte_ring_enqueue_burst(ring_out,
+ (void *)pkts, nb_rx_pkts, NULL);
app_stats.rx.enqueue_pkts += ret;
if (unlikely(ret < nb_rx_pkts)) {
app_stats.rx.enqueue_failed_pkts +=
@@ -473,7 +473,8 @@ worker_thread(void *args_ptr)
burst_buffer[i++]->port ^= xor_val;

/* enqueue the modified mbufs to workers_to_tx ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
+ ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
+ burst_size, NULL);
__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
if (unlikely(ret < burst_size)) {
/* Return the mbufs to their respective pool, dropping packets */
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index dab4594..0c81a15 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) == 0)) {
+ (void **)rx_mbufs, nb_rx, NULL) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -231,7 +231,7 @@ app_worker_thread(struct thread_conf **confs)
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
- (void **)mbufs, nb_pkt) == 0)
+ (void **)mbufs, nb_pkt, NULL) == 0)
; /* empty body */

conf_idx++;
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 3eb7fac..597b4c2 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != cl_rx_buf[node].count){
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 51db006..6552199 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -808,7 +808,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
/* Need to enqueue the free slots in global ring. */
n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
cached_free_slots->objs,
- LCORE_CACHE_SIZE);
+ LCORE_CACHE_SIZE, NULL);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 409b860..9b8fd2b 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -43,7 +43,7 @@ common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_mp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
@@ -51,7 +51,7 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_sp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index cc0b5b1..b599d65 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -197,7 +197,7 @@ pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
dup_bufs[d_pkts++] = p;
}

- ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts);
+ ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
if (unlikely(ring_enq < d_pkts)) {
RTE_LOG(DEBUG, PDUMP,
"only %d of packets enqueued to ring\n", ring_enq);
diff --git a/lib/librte_port/rte_port_ras.c b/lib/librte_port/rte_port_ras.c
index c4bb508..4de0945 100644
--- a/lib/librte_port/rte_port_ras.c
+++ b/lib/librte_port/rte_port_ras.c
@@ -167,7 +167,7 @@ send_burst(struct rte_port_ring_writer_ras *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 0df1bcf..c5dbe07 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -241,7 +241,7 @@ send_burst(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -256,7 +256,7 @@ send_burst_mp(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -318,11 +318,11 @@ rte_port_ring_writer_tx_bulk_internal(void *port,

RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
- n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
- n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
@@ -517,7 +517,7 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -527,7 +527,8 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_sp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -550,7 +551,7 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -560,7 +561,8 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_mp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -633,10 +635,12 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
n_pkts_ok =
- rte_ring_mp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
n_pkts_ok =
- rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

if (n_pkts_ok >= n_pkts)
return 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 34b438c..61a4dc8 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -354,20 +354,16 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, prod_next;
uint32_t cons_tail, free_entries;
- const unsigned max = n;
+ const unsigned int max = n;
int success;
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move prod.head atomically */
do {
/* Reset n to the initial burst count */
@@ -382,16 +378,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = (mask + cons_tail - prod_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : free_entries;
+
+ if (n == 0)
+ goto end;

prod_next = prod_head + n;
success = rte_atomic32_cmpset(&r->prod.head, prod_head,
@@ -410,6 +402,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -431,7 +426,8 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
@@ -447,16 +443,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = mask + cons_tail - prod_head;

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+
+ if (n == 0)
+ goto end;
+

prod_next = prod_head + n;
r->prod.head = prod_next;
@@ -466,6 +458,9 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -625,9 +620,10 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -644,9 +640,10 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -667,12 +664,12 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.single)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
}

/**
@@ -692,7 +689,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -709,7 +706,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -730,7 +727,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -976,9 +973,10 @@ struct rte_ring *rte_ring_lookup(const char *name);
*/
static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -995,9 +993,10 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -1018,12 +1017,12 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.single)
- return rte_ring_sp_enqueue_burst(r, obj_table, n);
+ return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_burst(r, obj_table, n);
+ return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
}

/**
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 1ac0aa8..0c6e04f 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -546,7 +546,8 @@ app_main_loop_rx_metadata(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs,
+ NULL);
} while (ret == 0);
}
}
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 4e20669..c06ff54 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -97,7 +97,7 @@ app_main_loop_rx(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs, NULL);
} while (ret == 0);
}
}
@@ -130,7 +130,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
- app.burst_size_worker_write);
+ app.burst_size_worker_write,
+ NULL);
} while (ret == 0);
}
}
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 53caa3e..8df28b4 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -206,7 +206,8 @@ slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
static int
slave_put_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf, size);
+ return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf,
+ size, NULL);
}

static uint16_t
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index af011f7..045a7f2 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -98,7 +98,7 @@ test_single_enqueue_dequeue(void)
const uint64_t sc_start = rte_rdtsc_precise();
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
- rte_ring_enqueue_bulk(r, &burst, 1);
+ rte_ring_enqueue_bulk(r, &burst, 1, NULL);
rte_ring_dequeue_bulk(r, &burst, 1);
}
const uint64_t sc_end = rte_rdtsc_precise();
@@ -131,7 +131,8 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 112433b..b0ca88b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -117,11 +117,12 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
+ NULL) != 0);
TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
@@ -167,19 +168,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -213,19 +214,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -260,7 +261,7 @@ test_ring_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -290,13 +291,13 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
@@ -371,19 +372,19 @@ test_ring_burst_basic(void)

printf("Test SP & SC basic functions \n");
printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK) ;
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -419,7 +420,7 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
goto fail;
@@ -427,14 +428,14 @@ test_ring_burst_basic(void)
}

printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
/* Always one free entry left */
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -444,7 +445,7 @@ test_ring_burst_basic(void)
goto fail;

printf("Test enqueue for a full entry \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;

@@ -486,19 +487,19 @@ test_ring_burst_basic(void)
printf("Test MP & MC basic functions \n");

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -534,7 +535,7 @@ test_ring_burst_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -557,19 +558,19 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK objects */
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -607,7 +608,7 @@ test_ring_burst_basic(void)

printf("Covering rte_ring_enqueue_burst functions \n");

- ret = rte_ring_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
@@ -746,7 +747,7 @@ test_ring_basic_ex(void)
}

/* Covering the ring burst operation */
- ret = rte_ring_enqueue_burst(rp, obj, 2);
+ ret = rte_ring_enqueue_burst(rp, obj, 2, NULL);
if ((ret & RTE_RING_SZ_MASK) != 2) {
printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
goto fail_test;
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 8ccbdef..f95a8e9 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -323,14 +323,16 @@ test_burst_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
@@ -357,14 +359,16 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
diff --git a/test/test/test_table_ports.c b/test/test/test_table_ports.c
index 2532367..395f4f3 100644
--- a/test/test/test_table_ports.c
+++ b/test/test/test_table_ports.c
@@ -80,7 +80,7 @@ test_port_ring_reader(void)
mbuf[0] = (void *)rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- mbuf, 1);
+ mbuf, 1, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);

if (received_pkts < expected_pkts)
@@ -93,7 +93,7 @@ test_port_ring_reader(void)
mbuf[i] = rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
RTE_PORT_IN_BURST_SIZE_MAX);

diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 6e4dcd8..39e070c 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -380,7 +380,7 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
nb_pkts = 0;
else
nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increment opacket count */
dev_private->eth_stats.opackets += nb_pkts;
@@ -496,7 +496,7 @@ virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
vrtl_eth_dev->data->dev_private;

return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

int
--
2.9.3
Bruce Richardson
2017-03-28 20:36:01 UTC
Permalink
Add an extra parameter to the ring dequeue burst/bulk functions so that
those functions can optionally return the amount of remaining objs in the
ring. This information can be used by applications in a number of ways,
for instance, with single-consumer queues, it provides a max
dequeue size which is guaranteed to work.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
v4: added in missing updates to crypto PMDs
---
app/pdump/main.c | 2 +-
doc/guides/prog_guide/writing_efficient_code.rst | 2 +-
doc/guides/rel_notes/release_17_05.rst | 8 ++
drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 2 +-
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 2 +-
drivers/crypto/armv8/rte_armv8_pmd.c | 2 +-
drivers/crypto/kasumi/rte_kasumi_pmd.c | 2 +-
drivers/crypto/null/null_crypto_pmd.c | 2 +-
drivers/crypto/openssl/rte_openssl_pmd.c | 2 +-
drivers/crypto/snow3g/rte_snow3g_pmd.c | 2 +-
drivers/crypto/zuc/rte_zuc_pmd.c | 2 +-
drivers/net/bonding/rte_eth_bond_pmd.c | 3 +-
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 2 +-
examples/load_balancer/runtime.c | 6 +-
.../client_server_mp/mp_client/client.c | 3 +-
examples/packet_ordering/main.c | 6 +-
examples/qos_sched/app_thread.c | 6 +-
examples/quota_watermark/qw/main.c | 5 +-
examples/server_node_efd/node/node.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 3 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_port/rte_port_frag.c | 3 +-
lib/librte_port/rte_port_ring.c | 6 +-
lib/librte_ring/rte_ring.h | 90 +++++++++++-----------
test/test-pipeline/runtime.c | 6 +-
test/test/test_link_bonding_mode4.c | 3 +-
test/test/test_pmd_ring_perf.c | 7 +-
test/test/test_ring.c | 54 ++++++-------
test/test/test_ring_perf.c | 20 +++--
test/test/test_table_acl.c | 2 +-
test/test/test_table_pipeline.c | 2 +-
test/test/test_table_ports.c | 8 +-
test/test/virtual_pmd.c | 4 +-
34 files changed, 153 insertions(+), 122 deletions(-)

diff --git a/app/pdump/main.c b/app/pdump/main.c
index b88090d..3b13753 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -496,7 +496,7 @@ pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)

/* first dequeue packets from ring of primary process */
const uint16_t nb_in_deq = rte_ring_dequeue_burst(ring,
- (void *)rxtx_bufs, BURST_SIZE);
+ (void *)rxtx_bufs, BURST_SIZE, NULL);
stats->dequeue_pkts += nb_in_deq;

if (nb_in_deq) {
diff --git a/doc/guides/prog_guide/writing_efficient_code.rst b/doc/guides/prog_guide/writing_efficient_code.rst
index 78d2afa..8223ace 100644
--- a/doc/guides/prog_guide/writing_efficient_code.rst
+++ b/doc/guides/prog_guide/writing_efficient_code.rst
@@ -124,7 +124,7 @@ The code algorithm that dequeues messages may be something similar to the follow

while (1) {
/* Process as many elements as can be dequeued. */
- count = rte_ring_dequeue_burst(ring, obj_table, MAX_BULK);
+ count = rte_ring_dequeue_burst(ring, obj_table, MAX_BULK, NULL);
if (unlikely(count == 0))
continue;

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index b361a98..c67e468 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -140,6 +140,8 @@ API Changes
* added an extra parameter to the burst/bulk enqueue functions to
return the number of free spaces in the ring after enqueue. This can
be used by an application to implement its own watermark functionality.
+ * added an extra parameter to the burst/bulk dequeue functions to return
+ the number elements remaining in the ring after dequeue.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
@@ -152,6 +154,12 @@ API Changes
- ``rte_ring_sc_dequeue_bulk``
- ``rte_ring_dequeue_bulk``

+ NOTE: the above functions all have different parameters as well as
+ different return values, due to the other listed changes above. This
+ means that all instances of the functions in existing code will be
+ flagged by the compiler. The return value usage should be checked
+ while fixing the compiler error due to the extra parameter.
+
ABI Changes
-----------

diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index a2d10a5..638a95d 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -420,7 +420,7 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 432d239..05edb6c 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -622,7 +622,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index 37ecd7b..6376e9e 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -765,7 +765,7 @@ armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned int nb_dequeued = 0;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 1dd05cb..55bdb29 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -542,7 +542,7 @@ kasumi_pmd_dequeue_burst(void *queue_pair,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index ed5a9fc..f68ec8d 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -155,7 +155,7 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index e74c5cf..09173b2 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1119,7 +1119,7 @@ openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned int nb_dequeued = 0;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 01c4e1c..1042b31 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -533,7 +533,7 @@ snow3g_pmd_dequeue_burst(void *queue_pair,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index 5e2dbf5..06ff503 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -433,7 +433,7 @@ zuc_pmd_dequeue_burst(void *queue_pair,
unsigned nb_dequeued;

nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;

return nb_dequeued;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index f3ac9e2..96638af 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1008,7 +1008,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
struct port *port = &mode_8023ad_ports[slaves[i]];

slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
+ slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
+ NULL);
slave_nb_pkts[i] = slave_slow_nb_pkts[i];

for (j = 0; j < slave_slow_nb_pkts[i]; j++)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index adbf478..77ef3a1 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -88,7 +88,7 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts.cnt += nb_rx;
else
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index bb84f13..90c9613 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -330,7 +330,7 @@ lcore_tx(struct rte_ring *in_r)

struct rte_mbuf *bufs[BURST_SIZE];
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
- (void *)bufs, BURST_SIZE);
+ (void *)bufs, BURST_SIZE, NULL);
app_stats.tx.dequeue_pkts += nb_rx;

/* if we get no traffic, flush anything we have */
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 1645994..8192c08 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -349,7 +349,8 @@ app_lcore_io_tx(
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
- bsz_rd);
+ bsz_rd,
+ NULL);

if (unlikely(ret == 0))
continue;
@@ -504,7 +505,8 @@ app_lcore_worker(
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
- bsz_rd);
+ bsz_rd,
+ NULL);

if (unlikely(ret == 0))
continue;
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index dca9eb9..01b535c 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -279,7 +279,8 @@ main(int argc, char *argv[])
uint16_t i, rx_pkts;
uint8_t port;

- rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
+ PKT_READ_SIZE, NULL);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 569b6da..49ae35b 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -462,7 +462,7 @@ worker_thread(void *args_ptr)

/* dequeue the mbufs from rx_to_workers ring */
burst_size = rte_ring_dequeue_burst(ring_in,
- (void *)burst_buffer, MAX_PKTS_BURST);
+ (void *)burst_buffer, MAX_PKTS_BURST, NULL);
if (unlikely(burst_size == 0))
continue;

@@ -510,7 +510,7 @@ send_thread(struct send_thread_args *args)

/* deque the mbufs from workers_to_tx ring */
nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);

if (unlikely(nb_dq_mbufs == 0))
continue;
@@ -595,7 +595,7 @@ tx_thread(struct rte_ring *ring_in)

/* deque the mbufs from workers_to_tx ring */
dqnum = rte_ring_dequeue_burst(ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);

if (unlikely(dqnum == 0))
continue;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 0c81a15..15f117f 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -179,7 +179,7 @@ app_tx_thread(struct thread_conf **confs)

while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
- burst_conf.qos_dequeue);
+ burst_conf.qos_dequeue, NULL);
if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

@@ -218,7 +218,7 @@ app_worker_thread(struct thread_conf **confs)

/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
@@ -254,7 +254,7 @@ app_mixed_thread(struct thread_conf **confs)

/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 57df8ef..2dcddea 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -247,7 +247,8 @@ pipeline_stage(__attribute__((unused)) void *args)
}

/* Dequeue up to quota mbuf from rx */
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
if (unlikely(nb_dq_pkts < 0))
continue;

@@ -305,7 +306,7 @@ send_stage(__attribute__((unused)) void *args)

/* Dequeue packets from tx and send them */
nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
- (void *) tx_pkts, *quota);
+ (void *) tx_pkts, *quota, NULL);
rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);

/* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index 9ec6a05..f780b92 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) == 0))
+ rx_pkts, NULL) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6552199..645c0cf 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -536,7 +536,8 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
if (cached_free_slots->len == 0) {
/* Need to get another burst of free slots from global ring */
n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
- cached_free_slots->objs, LCORE_CACHE_SIZE);
+ cached_free_slots->objs,
+ LCORE_CACHE_SIZE, NULL);
if (n_slots == 0)
return -ENOSPC;

diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 9b8fd2b..5c132bf 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -58,14 +58,14 @@ static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_mc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
return rte_ring_sc_dequeue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_port/rte_port_frag.c b/lib/librte_port/rte_port_frag.c
index 0fcace9..320407e 100644
--- a/lib/librte_port/rte_port_frag.c
+++ b/lib/librte_port/rte_port_frag.c
@@ -186,7 +186,8 @@ rte_port_ring_reader_frag_rx(void *port,
/* If "pkts" buffer is empty, read packet burst from ring */
if (p->n_pkts == 0) {
p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
- (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX,
+ NULL);
RTE_PORT_RING_READER_FRAG_STATS_PKTS_IN_ADD(p, p->n_pkts);
if (p->n_pkts == 0)
return n_pkts_out;
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index c5dbe07..85fad44 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -111,7 +111,8 @@ rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;

- nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

return nb_rx;
@@ -124,7 +125,8 @@ rte_port_ring_multi_reader_rx(void *port, struct rte_mbuf **pkts,
struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
uint32_t nb_rx;

- nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

return nb_rx;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 61a4dc8..b05fecb 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -488,7 +488,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
@@ -497,11 +498,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
@@ -516,15 +512,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = (prod_tail - cons_head);

/* Set the actual entries for dequeue */
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(n == 0))
+ goto end;

cons_next = cons_head + n;
success = rte_atomic32_cmpset(&r->cons.head, cons_head,
@@ -543,7 +535,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_pause();

r->cons.tail = cons_next;
-
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}

@@ -567,7 +561,8 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *available)
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
@@ -582,15 +577,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* and size(ring)-1. */
entries = prod_tail - cons_head;

- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- if (unlikely(entries == 0))
- return 0;
- n = entries;
- }
- }
+ if (n > entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+
+ if (unlikely(entries == 0))
+ goto end;

cons_next = cons_head + n;
r->cons.head = cons_next;
@@ -600,6 +591,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
+end:
+ if (available != NULL)
+ *available = entries - n;
return n;
}

@@ -746,9 +740,11 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}

/**
@@ -765,9 +761,11 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ available);
}

/**
@@ -787,12 +785,13 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects dequeued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
+ unsigned int *available)
{
if (r->cons.single)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
}

/**
@@ -813,7 +812,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -831,7 +830,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -853,7 +852,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
+ return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -1043,9 +1042,11 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}

/**
@@ -1063,9 +1064,11 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sc_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, available);
}

/**
@@ -1085,12 +1088,13 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* - Number of objects dequeued
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
if (r->cons.single)
- return rte_ring_sc_dequeue_burst(r, obj_table, n);
+ return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
else
- return rte_ring_mc_dequeue_burst(r, obj_table, n);
+ return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
}

#ifdef __cplusplus
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index c06ff54..8970e1c 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -121,7 +121,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_rx[i],
(void **) worker_mbuf->array,
- app.burst_size_worker_read);
+ app.burst_size_worker_read,
+ NULL);

if (ret == 0)
continue;
@@ -151,7 +152,8 @@ app_main_loop_tx(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_tx[i],
(void **) &app.mbuf_tx[i].array[n_mbufs],
- app.burst_size_tx_read);
+ app.burst_size_tx_read,
+ NULL);

if (ret == 0)
continue;
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 8df28b4..15091b1 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -193,7 +193,8 @@ static uint8_t lacpdu_rx_count[RTE_MAX_ETHPORTS] = {0, };
static int
slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf, size);
+ return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf,
+ size, NULL);
}

/*
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index 045a7f2..004882a 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -67,7 +67,7 @@ test_empty_dequeue(void)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();

const uint64_t eth_start = rte_rdtsc();
@@ -99,7 +99,7 @@ test_single_enqueue_dequeue(void)
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
rte_ring_enqueue_bulk(r, &burst, 1, NULL);
- rte_ring_dequeue_bulk(r, &burst, 1);
+ rte_ring_dequeue_bulk(r, &burst, 1, NULL);
}
const uint64_t sc_end = rte_rdtsc_precise();
rte_compiler_barrier();
@@ -133,7 +133,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, (void *)burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index b0ca88b..858ebc1 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -119,7 +119,8 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
__func__, i, rand);
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
NULL) != 0);
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand,
+ NULL) == rand);

/* fill the ring */
TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
@@ -129,7 +130,8 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz,
+ NULL) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -186,19 +188,19 @@ test_ring_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -232,19 +234,19 @@ test_ring_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1, NULL);
cur_dst += 1;
if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -265,7 +267,7 @@ test_ring_basic(void)
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if (ret == 0)
goto fail;
@@ -303,13 +305,13 @@ test_ring_basic(void)
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
cur_dst += num_elems;
if (ret == 0) {
printf("Cannot dequeue2\n");
@@ -390,19 +392,19 @@ test_ring_burst_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1) ;
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -451,19 +453,19 @@ test_ring_burst_basic(void)

printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK entries */
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -505,19 +507,19 @@ test_ring_burst_basic(void)
goto fail;

printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -539,7 +541,7 @@ test_ring_burst_basic(void)
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -578,19 +580,19 @@ test_ring_burst_basic(void)

printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available objects - the exact MAX_BULK */
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -613,7 +615,7 @@ test_ring_burst_basic(void)
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_dequeue_burst(r, cur_dst, 2);
+ ret = rte_ring_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
if (ret != 2)
goto fail;
@@ -753,7 +755,7 @@ test_ring_basic_ex(void)
goto fail_test;
}

- ret = rte_ring_dequeue_burst(rp, obj, 2);
+ ret = rte_ring_dequeue_burst(rp, obj, 2, NULL);
if (ret != 2) {
printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
goto fail_test;
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index f95a8e9..ed89896 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -152,12 +152,12 @@ test_empty_dequeue(void)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t mc_end = rte_rdtsc();

printf("SC empty dequeue: %.2F\n",
@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();

@@ -325,7 +325,8 @@ test_burst_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

@@ -333,7 +334,8 @@ test_burst_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_burst(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();

@@ -361,7 +363,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_sp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();

@@ -369,7 +372,8 @@ test_bulk_enqueue_dequeue(void)
for (i = 0; i < iterations; i++) {
rte_ring_mp_enqueue_bulk(r, burst,
bulk_sizes[sz], NULL);
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();

diff --git a/test/test/test_table_acl.c b/test/test/test_table_acl.c
index b3bfda4..4d43be7 100644
--- a/test/test/test_table_acl.c
+++ b/test/test/test_table_acl.c
@@ -713,7 +713,7 @@ test_pipeline_single_filter(int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;

- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0) {
printf("Got no objects from ring %d - error code %d\n",
i, ret);
diff --git a/test/test/test_table_pipeline.c b/test/test/test_table_pipeline.c
index 36bfeda..b58aa5d 100644
--- a/test/test/test_table_pipeline.c
+++ b/test/test/test_table_pipeline.c
@@ -494,7 +494,7 @@ test_pipeline_single_filter(int test_type, int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;

- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0)
printf("Got no objects from ring %d - error code %d\n",
i, ret);
diff --git a/test/test/test_table_ports.c b/test/test/test_table_ports.c
index 395f4f3..39592ce 100644
--- a/test/test/test_table_ports.c
+++ b/test/test/test_table_ports.c
@@ -163,7 +163,7 @@ test_port_ring_writer(void)
rte_port_ring_writer_ops.f_flush(port);
expected_pkts = 1;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -7;
@@ -178,7 +178,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -8;
@@ -193,7 +193,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -8;
@@ -208,7 +208,7 @@ test_port_ring_writer(void)

expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);

if (received_pkts < expected_pkts)
return -9;
diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 39e070c..b209355 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -342,7 +342,7 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
dev_private = vrtl_eth_dev->data->dev_private;

rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increments ipackets count */
dev_private->eth_stats.ipackets += rx_count;
@@ -508,7 +508,7 @@ virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,

dev_private = vrtl_eth_dev->data->dev_private;
return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

static uint8_t
--
2.9.3
Bruce Richardson
2017-03-28 20:36:02 UTC
Permalink
Now that the enqueue function returns the amount of space in the ring,
we can use that to replace the old watermark functionality. Update the
example app to do so, and re-enable it in the examples Makefile.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
v4: updated rst doc to match the code changes
---
doc/guides/sample_app_ug/quota_watermark.rst | 148 ++++++++++++++++-----------
examples/Makefile | 2 +-
examples/quota_watermark/qw/init.c | 5 +-
examples/quota_watermark/qw/main.c | 16 +--
examples/quota_watermark/qw/main.h | 1 +
examples/quota_watermark/qwctl/commands.c | 4 +-
examples/quota_watermark/qwctl/qwctl.c | 2 +
examples/quota_watermark/qwctl/qwctl.h | 1 +
8 files changed, 106 insertions(+), 73 deletions(-)

diff --git a/doc/guides/sample_app_ug/quota_watermark.rst b/doc/guides/sample_app_ug/quota_watermark.rst
index 9f86e10..09530f2 100644
--- a/doc/guides/sample_app_ug/quota_watermark.rst
+++ b/doc/guides/sample_app_ug/quota_watermark.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.

Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,13 @@
Quota and Watermark Sample Application
======================================

-The Quota and Watermark sample application is a simple example of packet processing using Data Plane Development Kit (DPDK) that
-showcases the use of a quota as the maximum number of packets enqueue/dequeue at a time and low and high watermarks
-to signal low and high ring usage respectively.
+The Quota and Watermark sample application is a simple example of packet
+processing using Data Plane Development Kit (DPDK) that showcases the use
+of a quota as the maximum number of packets enqueue/dequeue at a time and
+low and high thresholds, or watermarks, to signal low and high ring usage
+respectively.

-Additionally, it shows how ring watermarks can be used to feedback congestion notifications to data producers by
+Additionally, it shows how the thresholds can be used to feedback congestion notifications to data producers by
temporarily stopping processing overloaded rings and sending Ethernet flow control frames.

This sample application is split in two parts:
@@ -64,7 +66,7 @@ each stage of which being connected by rings, as shown in :numref:`figure_pipeli


An adjustable quota value controls how many packets are being moved through the pipeline per enqueue and dequeue.
-Adjustable watermark values associated with the rings control a back-off mechanism that
+Adjustable threshold values associated with the rings control a back-off mechanism that
tries to prevent the pipeline from being overloaded by:

* Stopping enqueuing on rings for which the usage has crossed the high watermark threshold
@@ -216,25 +218,26 @@ in the *DPDK Getting Started Guide* and the *DPDK API Reference*.
Shared Variables Setup
^^^^^^^^^^^^^^^^^^^^^^

-The quota and low_watermark shared variables are put into an rte_memzone using a call to setup_shared_variables():
+The quota and high and low watermark shared variables are put into an rte_memzone using a call to setup_shared_variables():

.. code-block:: c

void
setup_shared_variables(void)
{
- const struct rte_memzone *qw_memzone;
-
- qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME, 2 * sizeof(int), rte_socket_id(), RTE_MEMZONE_2MB);
+ const struct rte_memzone *qw_memzone;

- if (qw_memzone == NULL)
- rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
+ 3 * sizeof(int), rte_socket_id(), 0);
+ if (qw_memzone == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

- quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
- }
+ quota = qw_memzone->addr;
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
+ }

-These two variables are initialized to a default value in main() and
+These three variables are initialized to a default value in main() and
can be changed while qw is running using the qwctl control program.

Application Arguments
@@ -349,27 +352,37 @@ This is done using the following code:
/* Process each port round robin style */

for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
-
- ring = rings[lcore_id][port_id];
-
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(ring) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
-
- /* Enqueue received packets on the RX ring */
-
- nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, *quota);
-
- ret = rte_ring_enqueue_bulk(ring, (void *) pkts, nb_rx_pkts);
- if (ret == -EDQUOT) {
- ring_state[port_id] = RING_OVERLOADED;
- send_pause_frame(port_id, 1337);
- }
+ if (!is_bit_set(port_id, portmask))
+ continue;
+
+ ring = rings[lcore_id][port_id];
+
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(ring) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
+
+ /* Enqueue received packets on the RX ring */
+ nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
+ (uint16_t) *quota);
+ ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
+ ring_state[port_id] = RING_OVERLOADED;
+ send_pause_frame(port_id, 1337);
+ }
+
+ if (ret == 0) {
+
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_rx_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
}

For each port in the port mask, the corresponding ring's pointer is fetched into ring and that ring's state is checked:
@@ -390,30 +403,40 @@ This thread is running on most of the logical cores to create and arbitrarily lo
previous_lcore_id = get_previous_lcore_id(lcore_id);

for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
-
- tx = rings[lcore_id][port_id];
- rx = rings[previous_lcore_id][port_id];
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(tx) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
-
- /* Dequeue up to quota mbuf from rx */
-
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
-
- if (unlikely(nb_dq_pkts < 0))
- continue;
-
- /* Enqueue them on tx */
-
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
- ring_state[port_id] = RING_OVERLOADED;
+ if (!is_bit_set(port_id, portmask))
+ continue;
+
+ tx = rings[lcore_id][port_id];
+ rx = rings[previous_lcore_id][port_id];
+
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(tx) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
+
+ /* Dequeue up to quota mbuf from rx */
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
+ if (unlikely(nb_dq_pkts < 0))
+ continue;
+
+ /* Enqueue them on tx */
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
+ ring_state[port_id] = RING_OVERLOADED;
+
+ if (ret == 0) {
+
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_dq_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
}

The thread's logic works mostly like receive_stage(),
@@ -482,5 +505,6 @@ low_watermark from the rte_memzone previously created by qw.

quota = qw_memzone->addr;

- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
diff --git a/examples/Makefile b/examples/Makefile
index 19cd5ad..da2bfdd 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-#DIRS-y += quota_watermark
+DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index 95a9f94..6babfea 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -140,7 +140,7 @@ void init_ring(int lcore_id, uint8_t port_id)
if (ring == NULL)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

- rte_ring_set_water_mark(ring, 80 * RING_SIZE / 100);
+ *high_watermark = 80 * RING_SIZE / 100;

rings[lcore_id][port_id] = ring;
}
@@ -168,10 +168,11 @@ setup_shared_variables(void)
const struct rte_memzone *qw_memzone;

qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
- 2 * sizeof(int), rte_socket_id(), 0);
+ 3 * sizeof(int), rte_socket_id(), 0);
if (qw_memzone == NULL)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));

quota = qw_memzone->addr;
low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 2dcddea..bdb8a43 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -67,6 +67,7 @@ struct ether_fc_frame {

int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;

uint8_t port_pairs[RTE_MAX_ETHPORTS];

@@ -158,6 +159,7 @@ receive_stage(__attribute__((unused)) void *args)
uint16_t nb_rx_pkts;

unsigned int lcore_id;
+ unsigned int free;

struct rte_mbuf *pkts[MAX_PKT_QUOTA];
struct rte_ring *ring;
@@ -189,13 +191,13 @@ receive_stage(__attribute__((unused)) void *args)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
(uint16_t) *quota);
ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
- nb_rx_pkts);
- if (ret == -EDQUOT) {
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
ring_state[port_id] = RING_OVERLOADED;
send_pause_frame(port_id, 1337);
}

- else if (ret == -ENOBUFS) {
+ if (ret == 0) {

/*
* Return mbufs to the pool,
@@ -217,6 +219,7 @@ pipeline_stage(__attribute__((unused)) void *args)
uint8_t port_id;

unsigned int lcore_id, previous_lcore_id;
+ unsigned int free;

void *pkts[MAX_PKT_QUOTA];
struct rte_ring *rx, *tx;
@@ -253,11 +256,12 @@ pipeline_stage(__attribute__((unused)) void *args)
continue;

/* Enqueue them on tx */
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
ring_state[port_id] = RING_OVERLOADED;

- else if (ret == -ENOBUFS) {
+ if (ret == 0) {

/*
* Return mbufs to the pool,
diff --git a/examples/quota_watermark/qw/main.h b/examples/quota_watermark/qw/main.h
index 545ba42..8c8e311 100644
--- a/examples/quota_watermark/qw/main.h
+++ b/examples/quota_watermark/qw/main.h
@@ -43,6 +43,7 @@ enum ring_state {

extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;

extern uint8_t port_pairs[RTE_MAX_ETHPORTS];

diff --git a/examples/quota_watermark/qwctl/commands.c b/examples/quota_watermark/qwctl/commands.c
index 036bf80..5cac0e1 100644
--- a/examples/quota_watermark/qwctl/commands.c
+++ b/examples/quota_watermark/qwctl/commands.c
@@ -140,8 +140,8 @@ cmd_set_handler(__attribute__((unused)) void *parsed_result,
else
if (tokens->value >= *low_watermark * 100 / RING_SIZE
&& tokens->value <= 100)
- rte_ring_set_water_mark(ring,
- tokens->value * RING_SIZE / 100);
+ *high_watermark = tokens->value *
+ RING_SIZE / 100;
else
cmdline_printf(cl,
"ring high watermark must be between %u%% and 100%%\n",
diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c
index 3a85cc3..7e7a396 100644
--- a/examples/quota_watermark/qwctl/qwctl.c
+++ b/examples/quota_watermark/qwctl/qwctl.c
@@ -55,6 +55,7 @@

int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;


static void
@@ -68,6 +69,7 @@ setup_shared_variables(void)

quota = qw_memzone->addr;
low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}

int main(int argc, char **argv)
diff --git a/examples/quota_watermark/qwctl/qwctl.h b/examples/quota_watermark/qwctl/qwctl.h
index 8d146e5..545914b 100644
--- a/examples/quota_watermark/qwctl/qwctl.h
+++ b/examples/quota_watermark/qwctl/qwctl.h
@@ -36,5 +36,6 @@

extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;

#endif /* _MAIN_H_ */
--
2.9.3
Bruce Richardson
2017-03-28 20:36:03 UTC
Permalink
The local variable i is only used for loop control so define it in
the enqueue and dequeue blocks directly, rather than at the function
level.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
lib/librte_ring/rte_ring.h | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index b05fecb..e801510 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -285,6 +285,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
+ unsigned int i; \
const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
@@ -311,6 +312,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
+ unsigned int i; \
uint32_t idx = cons_head & mask; \
const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
@@ -361,7 +363,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned int max = n;
int success;
- unsigned int i;
uint32_t mask = r->mask;

/* move prod.head atomically */
@@ -431,7 +432,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned int i;
uint32_t mask = r->mask;

prod_head = r->prod.head;
@@ -495,7 +495,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned int i;
uint32_t mask = r->mask;

/* move cons.head atomically */
@@ -566,7 +565,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-03-28 20:36:04 UTC
Permalink
We can write a single common function for head manipulation for enq
and a common one for deq, allowing us to have a single worker function
for enq and deq, rather than two of each. Update all other inline
functions to use the new functions.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V3: renamed parameter "is_mp" in __rte_ring_do_dequeue to the correct "is_sc"
---
lib/librte_ring/rte_ring.c | 4 +-
lib/librte_ring/rte_ring.h | 328 ++++++++++++++++++++-------------------------
2 files changed, 149 insertions(+), 183 deletions(-)

diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 25f64f0..5f98c33 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,8 +138,8 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.single = !!(flags & RING_F_SP_ENQ);
- r->cons.single = !!(flags & RING_F_SC_DEQ);
+ r->prod.single = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;
+ r->cons.single = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;
r->size = count;
r->mask = count - 1;
r->prod.head = r->cons.head = 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index e801510..3d8f738 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -169,6 +169,12 @@ struct rte_ring {
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

+/* @internal defines for passing to the enqueue dequeue worker functions */
+#define __IS_SP 1
+#define __IS_MP 0
+#define __IS_SC 1
+#define __IS_MC 0
+
/**
* Calculate the memory size needed for a ring
*
@@ -287,7 +293,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
#define ENQUEUE_PTRS() do { \
unsigned int i; \
const uint32_t size = r->size; \
- uint32_t idx = prod_head & mask; \
+ uint32_t idx = prod_head & r->mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
r->ring[idx] = obj_table[i]; \
@@ -313,7 +319,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
unsigned int i; \
- uint32_t idx = cons_head & mask; \
+ uint32_t idx = cons_head & r->mask; \
const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
@@ -336,83 +342,72 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} while (0)

/**
- * @internal Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
+ * @internal This function updates the producer head for enqueue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to the ring structure
+ * @param is_sp
+ * Indicates whether multi-producer path is needed or not
* @param n
- * The number of objects to add in the ring from the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ * Returns the amount of free space in the ring BEFORE head was moved
* @return
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *free_entries)
{
- uint32_t prod_head, prod_next;
- uint32_t cons_tail, free_entries;
- const unsigned int max = n;
+ const uint32_t mask = r->mask;
+ unsigned int max = n;
int success;
- uint32_t mask = r->mask;

- /* move prod.head atomically */
do {
/* Reset n to the initial burst count */
n = max;

- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
+ *old_head = r->prod.head;
+ const uint32_t cons_tail = r->cons.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
+ * *old_head > cons_tail). So 'free_entries' is always between 0
* and size(ring)-1. */
- free_entries = (mask + cons_tail - prod_head);
+ *free_entries = (mask + cons_tail - *old_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries))
+ if (unlikely(n > *free_entries))
n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : free_entries;
+ 0 : *free_entries;

if (n == 0)
- goto end;
-
- prod_next = prod_head + n;
- success = rte_atomic32_cmpset(&r->prod.head, prod_head,
- prod_next);
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sp)
+ r->prod.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->prod.head,
+ *old_head, *new_head);
} while (unlikely(success == 0));
-
- /* write entries in ring */
- ENQUEUE_PTRS();
- rte_smp_wmb();
-
- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-end:
- if (free_space != NULL)
- *free_space = free_entries - n;
return n;
}

/**
- * @internal Enqueue several objects on a ring (NOT multi-producers safe).
+ * @internal Enqueue several objects on the ring
*
- * @param r
+ * @param r
* A pointer to the ring structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
@@ -420,44 +415,40 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param is_sp
+ * Indicates whether to use single producer or multi-producer head update
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
* @return
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- uint32_t mask = r->mask;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;

+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
if (n == 0)
goto end;

-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
ENQUEUE_PTRS();
rte_smp_wmb();

+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->prod.tail != prod_head))
+ rte_pause();
+
r->prod.tail = prod_next;
+
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -465,130 +456,112 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
}

/**
- * @internal Dequeue several objects from a ring (multi-consumers safe). When
- * the request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
+ * @internal This function updates the consumer head for dequeue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to the ring structure
+ * @param is_sc
+ * Indicates whether multi-consumer path is needed or not
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ * Returns the number of entries in the ring BEFORE head was moved
* @return
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *entries)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- const unsigned max = n;
+ unsigned int max = n;
int success;
- uint32_t mask = r->mask;

/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
n = max;

- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
+ *old_head = r->cons.head;
+ const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1. */
- entries = (prod_tail - cons_head);
+ *entries = (prod_tail - *old_head);

/* Set the actual entries for dequeue */
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;

if (unlikely(n == 0))
- goto end;
-
- cons_next = cons_head + n;
- success = rte_atomic32_cmpset(&r->cons.head, cons_head,
- cons_next);
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sc)
+ r->cons.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->cons.head, *old_head,
+ *new_head);
} while (unlikely(success == 0));
-
- /* copy in table */
- DEQUEUE_PTRS();
- rte_smp_rmb();
-
- /*
- * If there are other dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head))
- rte_pause();
-
- r->cons.tail = cons_next;
-end:
- if (available != NULL)
- *available = entries - n;
return n;
}

/**
- * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
- * When the request objects are more than the available objects, only dequeue
- * the actual number of objects
+ * @internal Dequeue several objects from the ring
*
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of void * pointers (objects).
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of objects to pull from the ring.
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param is_sc
+ * Indicates whether to use single consumer or multi-consumer head update
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
* @return
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *available)
+ int is_sc, unsigned int *available)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- uint32_t mask = r->mask;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = prod_tail - cons_head;
-
- if (n > entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
-
- if (unlikely(entries == 0))
- goto end;
+ uint32_t cons_head, cons_next;
+ uint32_t entries;

- cons_next = cons_head + n;
- r->cons.head = cons_next;
+ n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;

- /* copy in table */
DEQUEUE_PTRS();
rte_smp_rmb();

+ /*
+ * If there are other enqueues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (unlikely(r->cons.tail != cons_head))
+ rte_pause();
+
r->cons.tail = cons_next;
+
end:
if (available != NULL)
*available = entries - n;
@@ -614,8 +587,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MP, free_space);
}

/**
@@ -634,8 +607,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SP, free_space);
}

/**
@@ -658,10 +631,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- if (r->prod.single)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
- else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->prod.single, free_space);
}

/**
@@ -741,8 +712,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MC, available);
}

/**
@@ -762,8 +733,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
- available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SC, available);
}

/**
@@ -786,10 +757,8 @@ static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
unsigned int *available)
{
- if (r->cons.single)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n, available);
- else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n, available);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->cons.single, available);
}

/**
@@ -972,8 +941,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
}

/**
@@ -992,8 +961,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
}

/**
@@ -1016,10 +985,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
- if (r->prod.single)
- return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
- else
- return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
+ r->prod.single, free_space);
}

/**
@@ -1043,8 +1010,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
}

/**
@@ -1065,8 +1032,8 @@ static inline unsigned __attribute__((always_inline))
rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n,
- RTE_RING_QUEUE_VARIABLE, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
}

/**
@@ -1089,10 +1056,9 @@ static inline unsigned __attribute__((always_inline))
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
- if (r->cons.single)
- return rte_ring_sc_dequeue_burst(r, obj_table, n, available);
- else
- return rte_ring_mc_dequeue_burst(r, obj_table, n, available);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE,
+ r->cons.single, available);
}

#ifdef __cplusplus
--
2.9.3
Bruce Richardson
2017-03-28 20:36:05 UTC
Permalink
Both producer and consumer use the same logic for updating the tail
index so merge into a single function.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V3: added check for "single" mode in tail update to buypass unneeded check
---
lib/librte_ring/rte_ring.h | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 3d8f738..b352dad 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -341,6 +341,21 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
} while (0)

+static inline __attribute__((always_inline)) void
+update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
+ uint32_t single)
+{
+ /*
+ * If there are other enqueues/dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ if (!single)
+ while (unlikely(ht->tail != old_val))
+ rte_pause();
+
+ ht->tail = new_val;
+}
+
/**
* @internal This function updates the producer head for enqueue
*
@@ -440,15 +455,7 @@ __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-
+ update_tail(&r->prod, prod_head, prod_next, is_sp);
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -553,14 +560,7 @@ __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head))
- rte_pause();
-
- r->cons.tail = cons_next;
+ update_tail(&r->cons, cons_head, cons_next, is_sc);

end:
if (available != NULL)
--
2.9.3
Bruce Richardson
2017-03-28 20:36:06 UTC
Permalink
Modify the enqueue and dequeue macros to support copying any type of
object by passing in the exact object type. Rather than using the "ring"
structure member of rte_ring, which is of type "array of void *", instead
have the macros take the start of the ring a a pointer value, thereby
leaving the rte_ring structure as purely a header value. This allows it
to be reused by other future ring types which can add on extra fields if
they want, or even to have the actual ring elements, of whatever type
stored separate from the ring header.

Signed-off-by: Bruce Richardson <***@intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
lib/librte_ring/rte_ring.h | 68 ++++++++++++++++++++++++----------------------
1 file changed, 36 insertions(+), 32 deletions(-)

diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index b352dad..f0692d3 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -159,11 +159,7 @@ struct rte_ring {

/** Ring consumer status. */
struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
-
- void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
- * not volatile so need to be careful
- * about compiler re-ordering */
-};
+} __rte_cache_aligned;

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
@@ -290,54 +286,62 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
/* the actual enqueue of pointers on the ring.
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
-#define ENQUEUE_PTRS() do { \
+#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
unsigned int i; \
- const uint32_t size = r->size; \
- uint32_t idx = prod_head & r->mask; \
+ const uint32_t size = (r)->size; \
+ uint32_t idx = prod_head & (r)->mask; \
+ obj_type *ring = (void *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
- r->ring[idx] = obj_table[i]; \
- r->ring[idx+1] = obj_table[i+1]; \
- r->ring[idx+2] = obj_table[i+2]; \
- r->ring[idx+3] = obj_table[i+3]; \
+ ring[idx] = obj_table[i]; \
+ ring[idx+1] = obj_table[i+1]; \
+ ring[idx+2] = obj_table[i+2]; \
+ ring[idx+3] = obj_table[i+3]; \
} \
switch (n & 0x3) { \
- case 3: r->ring[idx++] = obj_table[i++]; \
- case 2: r->ring[idx++] = obj_table[i++]; \
- case 1: r->ring[idx++] = obj_table[i++]; \
+ case 3: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 2: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 1: \
+ ring[idx++] = obj_table[i++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++)\
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
for (idx = 0; i < n; i++, idx++) \
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
} \
-} while(0)
+} while (0)

/* the actual copy of pointers on the ring to obj_table.
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
-#define DEQUEUE_PTRS() do { \
+#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
unsigned int i; \
- uint32_t idx = cons_head & r->mask; \
- const uint32_t size = r->size; \
+ uint32_t idx = cons_head & (r)->mask; \
+ const uint32_t size = (r)->size; \
+ obj_type *ring = (void *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
- obj_table[i] = r->ring[idx]; \
- obj_table[i+1] = r->ring[idx+1]; \
- obj_table[i+2] = r->ring[idx+2]; \
- obj_table[i+3] = r->ring[idx+3]; \
+ obj_table[i] = ring[idx]; \
+ obj_table[i+1] = ring[idx+1]; \
+ obj_table[i+2] = ring[idx+2]; \
+ obj_table[i+3] = ring[idx+3]; \
} \
switch (n & 0x3) { \
- case 3: obj_table[i++] = r->ring[idx++]; \
- case 2: obj_table[i++] = r->ring[idx++]; \
- case 1: obj_table[i++] = r->ring[idx++]; \
+ case 3: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 2: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 1: \
+ obj_table[i++] = ring[idx++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
for (idx = 0; i < n; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
} \
} while (0)

@@ -452,7 +456,7 @@ __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
if (n == 0)
goto end;

- ENQUEUE_PTRS();
+ ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
rte_smp_wmb();

update_tail(&r->prod, prod_head, prod_next, is_sp);
@@ -557,7 +561,7 @@ __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
if (n == 0)
goto end;

- DEQUEUE_PTRS();
+ DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
rte_smp_rmb();

update_tail(&r->cons, cons_head, cons_next, is_sc);
--
2.9.3
Yuanhan Liu
2017-03-29 02:47:52 UTC
Permalink
Post by Bruce Richardson
This patchset make a set of, sometimes non-backward compatible, cleanup
changes to the rte_ring code in order to improve it. The resulting code is
shorter, since the existing functions are restructured to reduce code
duplication, as well as being more consistent in behaviour. The specific
changes made are explained in each patch which makes that change.
...
Post by Bruce Richardson
52 files changed, 664 insertions(+), 1472 deletions(-)
^^^ ^^^^
Great rework!

Series Reviewed-by: Yuanhan Liu <***@linux.intel.com>

--yliu
Bruce Richardson
2017-03-29 13:09:32 UTC
Permalink
There was a compile time setting to enable a ring to yield when
it entered a loop in mp or mc rings waiting for the tail pointer update.
Build time settings are not recommended for enabling/disabling features,
and since this was off by default, remove it completely. If needed, a
runtime enabled equivalent can be used.

Signed-off-by: Bruce Richardson <***@intel.com>
Reviewed-by: Yuanhan Liu <***@linux.intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
config/common_base | 1 -
doc/guides/prog_guide/env_abstraction_layer.rst | 5 ----
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.h | 35 +++++--------------------
4 files changed, 7 insertions(+), 35 deletions(-)

diff --git a/config/common_base b/config/common_base
index 69e91ae..2d54ddf 100644
--- a/config/common_base
+++ b/config/common_base
@@ -452,7 +452,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
# Compile librte_mempool
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 10a10a8..7c39cd2 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -352,11 +352,6 @@ Known Issues

3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.

- ``RTE_RING_PAUSE_REP_COUNT`` is defined for rte_ring to reduce contention. It's mainly for case 2, a yield is issued after number of times pause repeat.
-
- It adds a sched_yield() syscall if the thread spins for too long while waiting on the other thread to finish its operations on the ring.
- This gives the preempted thread a chance to proceed and finish with the ring enqueue/dequeue operation.
-
+ rte_timer

Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 50123c2..25d8549 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -134,6 +134,7 @@ API Changes

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
+ * removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 2777b41..f8ac7f5 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -114,11 +114,6 @@ enum rte_ring_queue_behavior {
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)

-#ifndef RTE_RING_PAUSE_REP_COUNT
-#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
- * if RTE_RING_PAUSE_REP not defined. */
-#endif
-
struct rte_memzone; /* forward declaration, so as not to require memzone.h */

#if RTE_CACHE_LINE_SIZE < 128
@@ -393,7 +388,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -447,18 +442,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->prod.tail != prod_head)) {
+ while (unlikely(r->prod.tail != prod_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->prod.tail = prod_next;
return ret;
}
@@ -491,7 +477,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;
int ret;

@@ -568,7 +554,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
+ unsigned int i;
uint32_t mask = r->mask;

/* Avoid the unnecessary cmpset operation below, which is also
@@ -613,18 +599,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* If there are other dequeues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->cons.tail != cons_head)) {
+ while (unlikely(r->cons.tail != cons_head))
rte_pause();

- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -659,7 +636,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned i;
+ unsigned int i;
uint32_t mask = r->mask;

cons_head = r->cons.head;
--
2.9.3
Bruce Richardson
2017-03-29 13:09:31 UTC
Permalink
The debug option only provided statistics to the user, most of
which could be tracked by the application itself. Remove this as a
compile time option, and feature, simplifying the code.

Signed-off-by: Bruce Richardson <***@intel.com>
Reviewed-by: Yuanhan Liu <***@linux.intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
config/common_base | 1 -
doc/guides/prog_guide/ring_lib.rst | 7 -
doc/guides/rel_notes/release_17_05.rst | 1 +
lib/librte_ring/rte_ring.c | 41 ----
lib/librte_ring/rte_ring.h | 97 +-------
test/test/test_ring.c | 410 ---------------------------------
6 files changed, 13 insertions(+), 544 deletions(-)

diff --git a/config/common_base b/config/common_base
index c394651..69e91ae 100644
--- a/config/common_base
+++ b/config/common_base
@@ -452,7 +452,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_LIBRTE_RING_DEBUG=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0

#
diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index 9f69753..d4ab502 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -110,13 +110,6 @@ Once an enqueue operation reaches the high water mark, the producer is notified,

This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.

-Debug
-~~~~~
-
-When debug is enabled (CONFIG_RTE_LIBRTE_RING_DEBUG is set),
-the library stores some per-ring statistic counters about the number of enqueues/dequeues.
-These statistics are per-core to avoid concurrent accesses or atomic operations.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 8b66ac3..50123c2 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -133,6 +133,7 @@ API Changes
have been made to it:

* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
+ * removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``

ABI Changes
-----------
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 93485d4..934ce87 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -131,12 +131,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_LIBRTE_RING_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif

/* init the ring structure */
memset(r, 0, sizeof(*r));
@@ -284,11 +278,6 @@ rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats sum;
- unsigned lcore_id;
-#endif
-
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
@@ -302,36 +291,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " watermark=0\n");
else
fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
-
- /* sum and dump statistics */
-#ifdef RTE_LIBRTE_RING_DEBUG
- memset(&sum, 0, sizeof(sum));
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
- sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
- sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
- sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
- sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
- sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
- sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
- sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
- sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
- sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
- }
- fprintf(f, " size=%"PRIu32"\n", r->size);
- fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
- fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
- fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
- fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
- fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
- fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
- fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
- fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
- fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
- fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
-#else
- fprintf(f, " no statistics available\n");
-#endif
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index d650215..2777b41 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -109,24 +109,6 @@ enum rte_ring_queue_behavior {
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};

-#ifdef RTE_LIBRTE_RING_DEBUG
-/**
- * A structure that stores the ring statistics (per-lcore).
- */
-struct rte_ring_debug_stats {
- uint64_t enq_success_bulk; /**< Successful enqueues number. */
- uint64_t enq_success_objs; /**< Objects successfully enqueued. */
- uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
- uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
- uint64_t enq_fail_bulk; /**< Failed enqueues number. */
- uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
- uint64_t deq_success_bulk; /**< Successful dequeues number. */
- uint64_t deq_success_objs; /**< Objects successfully dequeued. */
- uint64_t deq_fail_bulk; /**< Failed dequeues number. */
- uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
-} __rte_cache_aligned;
-#endif
-
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
@@ -184,10 +166,6 @@ struct rte_ring {
/** Ring consumer status. */
struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);

-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
-#endif
-
void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
* not volatile so need to be careful
* about compiler re-ordering */
@@ -199,27 +177,6 @@ struct rte_ring {
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
- * @internal When debug is enabled, store ring statistics.
- * @param r
- * A pointer to the ring.
- * @param name
- * The name of the statistics field to increment in the ring.
- * @param n
- * The number to add to the object-oriented statistics.
- */
-#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- if (__lcore_id < RTE_MAX_LCORE) { \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
- } \
- } while(0)
-#else
-#define __RING_STAT_ADD(r, name, n) do {} while(0)
-#endif
-
-/**
* Calculate the memory size needed for a ring
*
* This function returns the number of bytes needed for a ring, given
@@ -460,17 +417,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -485,15 +437,11 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

/*
* If there are other enqueues in progress that preceded us,
@@ -557,17 +505,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,

/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
@@ -580,15 +523,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }

r->prod.tail = prod_next;
return ret;
@@ -652,16 +591,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

/* Set the actual entries for dequeue */
if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -691,7 +625,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
sched_yield();
}
}
- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;

return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@@ -738,16 +671,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = prod_tail - cons_head;

if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
@@ -759,7 +687,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();

- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
}
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 5f09097..3891f5d 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -763,412 +763,6 @@ test_ring_burst_basic(void)
return -1;
}

-static int
-test_ring_stats(void)
-{
-
-#ifndef RTE_LIBRTE_RING_DEBUG
- printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
- return 0;
-#else
- void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
- int ret;
- unsigned i;
- unsigned num_items = 0;
- unsigned failed_enqueue_ops = 0;
- unsigned failed_enqueue_items = 0;
- unsigned failed_dequeue_ops = 0;
- unsigned failed_dequeue_items = 0;
- unsigned last_enqueue_ops = 0;
- unsigned last_enqueue_items = 0;
- unsigned last_quota_ops = 0;
- unsigned last_quota_items = 0;
- unsigned lcore_id = rte_lcore_id();
- struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
-
- printf("Test the ring stats.\n");
-
- /* Reset the watermark in case it was set in another test. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Allocate some dummy object pointers. */
- src = malloc(RING_SIZE*2*sizeof(void *));
- if (src == NULL)
- goto fail;
-
- for (i = 0; i < RING_SIZE*2 ; i++) {
- src[i] = (void *)(unsigned long)i;
- }
-
- /* Allocate some memory for copied objects. */
- dst = malloc(RING_SIZE*2*sizeof(void *));
- if (dst == NULL)
- goto fail;
-
- memset(dst, 0, RING_SIZE*2*sizeof(void *));
-
- /* Set the head and tail pointers. */
- cur_src = src;
- cur_dst = dst;
-
- /* Do Enqueue tests. */
- printf("Test the dequeue stats.\n");
-
- /* Fill the ring up to RING_SIZE -1. */
- printf("Fill the ring.\n");
- for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
- rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- }
-
- /* Adjust for final enqueue = MAX_BULK -1. */
- cur_src--;
-
- printf("Verify that the ring is full.\n");
- if (rte_ring_full(r) != 1)
- goto fail;
-
-
- printf("Verify the enqueue success stats.\n");
- /* Stats should match above enqueue operations to fill the ring. */
- if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Current max objects is RING_SIZE -1. */
- if (ring_stats->enq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any failures yet. */
- if (ring_stats->enq_fail_bulk != 0)
- goto fail;
- if (ring_stats->enq_fail_objs != 0)
- goto fail;
-
-
- printf("Test stats for SP burst enqueue to a full ring.\n");
- num_items = 2;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for SP bulk enqueue to a full ring.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP burst enqueue to a full ring.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP bulk enqueue to a full ring.\n");
- num_items = 16;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- /* Do Dequeue tests. */
- printf("Test the dequeue stats.\n");
-
- printf("Empty the ring.\n");
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* There was only RING_SIZE -1 objects to dequeue. */
- cur_dst++;
-
- printf("Verify ring is empty.\n");
- if (1 != rte_ring_empty(r))
- goto fail;
-
- printf("Verify the dequeue success stats.\n");
- /* Stats should match above dequeue operations. */
- if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Objects dequeued is RING_SIZE -1. */
- if (ring_stats->deq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any dequeue failure stats yet. */
- if (ring_stats->deq_fail_bulk != 0)
- goto fail;
-
- printf("Test stats for SC burst dequeue with an empty ring.\n");
- num_items = 2;
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for SC bulk dequeue with an empty ring.\n");
- num_items = 4;
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC burst dequeue with an empty ring.\n");
- num_items = 8;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC bulk dequeue with an empty ring.\n");
- num_items = 16;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test total enqueue/dequeue stats.\n");
- /* At this point the enqueue and dequeue stats should be the same. */
- if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
- goto fail;
- if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
- goto fail;
- if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
- goto fail;
- if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
- goto fail;
-
-
- /* Watermark Tests. */
- printf("Test the watermark/quota stats.\n");
-
- printf("Verify the initial watermark stats.\n");
- /* Watermark stats should be 0 since there is no watermark. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Set a watermark. */
- rte_ring_set_water_mark(r, 16);
-
- /* Reset pointers. */
- cur_src = src;
- cur_dst = dst;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue below watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should still be 0. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Success stats should have increased. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
- goto fail;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue at watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != 1)
- goto fail;
- if (ring_stats->enq_quota_objs != num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP burst enqueue above watermark.\n");
- num_items = 1;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP burst enqueue above watermark.\n");
- num_items = 2;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP bulk enqueue above watermark.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP bulk enqueue above watermark.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- printf("Test watermark success stats.\n");
- /* Success stats should be same as last non-watermarked enqueue. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items)
- goto fail;
-
-
- /* Cleanup. */
-
- /* Empty the ring. */
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* Reset the watermark. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Free memory before test completed */
- free(src);
- free(dst);
- return 0;
-
-fail:
- free(src);
- free(dst);
- return -1;
-#endif
-}
-
/*
* it will always fail to create ring with a wrong ring size number in this function
*/
@@ -1335,10 +929,6 @@ test_ring(void)
if (test_ring_basic() < 0)
return -1;

- /* ring stats */
- if (test_ring_stats() < 0)
- return -1;
-
/* basic operations */
if (test_live_watermark_change() < 0)
return -1;
--
2.9.3
Bruce Richardson
2017-03-29 13:09:33 UTC
Permalink
Remove the watermark support. A future commit will add support for having
enqueue functions return the amount of free space in the ring, which will
allow applications to implement their own watermark checks, while also
being more useful to the app.

Signed-off-by: Bruce Richardson <***@intel.com>
Reviewed-by: Yuanhan Liu <***@linux.intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V2: fix missed references to watermarks in v1
---
doc/guides/prog_guide/ring_lib.rst | 8 --
doc/guides/rel_notes/release_17_05.rst | 2 +
examples/Makefile | 2 +-
lib/librte_ring/rte_ring.c | 23 -----
lib/librte_ring/rte_ring.h | 58 +------------
test/test/autotest_test_funcs.py | 7 --
test/test/commands.c | 52 ------------
test/test/test_ring.c | 149 +--------------------------------
8 files changed, 8 insertions(+), 293 deletions(-)

diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index d4ab502..b31ab7a 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -102,14 +102,6 @@ Name
A ring is identified by a unique name.
It is not possible to create two rings with the same name (rte_ring_create() returns NULL if this is attempted).

-Water Marking
-~~~~~~~~~~~~~
-
-The ring can have a high water mark (threshold).
-Once an enqueue operation reaches the high water mark, the producer is notified, if the water mark is configured.
-
-This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.
-
Use Cases
---------

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 25d8549..084b359 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -135,6 +135,8 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
+ * removed the function ``rte_ring_set_water_mark`` as part of a general
+ removal of watermarks support in the library.

ABI Changes
-----------
diff --git a/examples/Makefile b/examples/Makefile
index da2bfdd..19cd5ad 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -81,7 +81,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
-DIRS-y += quota_watermark
+#DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index 934ce87..25f64f0 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -138,7 +138,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->watermark = count;
r->prod.single = !!(flags & RING_F_SP_ENQ);
r->cons.single = !!(flags & RING_F_SC_DEQ);
r->size = count;
@@ -256,24 +255,6 @@ rte_ring_free(struct rte_ring *r)
rte_free(te);
}

-/*
- * change the high water mark. If *count* is 0, water marking is
- * disabled
- */
-int
-rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
-{
- if (count >= r->size)
- return -EINVAL;
-
- /* if count is 0, disable the watermarking */
- if (count == 0)
- count = r->size;
-
- r->watermark = count;
- return 0;
-}
-
/* dump the status of the ring on the console */
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
@@ -287,10 +268,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->watermark == r->size)
- fprintf(f, " watermark=0\n");
- else
- fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
}

/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index f8ac7f5..906e8ae 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -153,7 +153,6 @@ struct rte_ring {
/**< Memzone, if any, containing the rte_ring */
uint32_t size; /**< Size of ring. */
uint32_t mask; /**< Mask (size-1) of ring. */
- uint32_t watermark; /**< Max items before EDQUOT in producer. */

/** Ring producer status. */
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
@@ -168,7 +167,6 @@ struct rte_ring {

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
-#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */

/**
@@ -274,26 +272,6 @@ struct rte_ring *rte_ring_create(const char *name, unsigned count,
void rte_ring_free(struct rte_ring *r);

/**
- * Change the high water mark.
- *
- * If *count* is 0, water marking is disabled. Otherwise, it is set to the
- * *count* value. The *count* value must be greater than 0 and less
- * than the ring size.
- *
- * This function can be called at any time (not necessarily at
- * initialization).
- *
- * @param r
- * A pointer to the ring structure.
- * @param count
- * The new water mark value.
- * @return
- * - 0: Success; water mark changed.
- * - -EINVAL: Invalid water mark value.
- */
-int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
-
-/**
* Dump the status of the ring to a file.
*
* @param f
@@ -374,8 +352,6 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -390,7 +366,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
int success;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
@@ -431,13 +406,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
/*
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
@@ -446,7 +414,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -465,8 +433,6 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* Depend on the behavior value
* if behavior = RTE_RING_QUEUE_FIXED
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
* if behavior = RTE_RING_QUEUE_VARIABLE
* - n: Actual number of objects enqueued.
@@ -479,7 +445,6 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t prod_next, free_entries;
unsigned int i;
uint32_t mask = r->mask;
- int ret;

prod_head = r->prod.head;
cons_tail = r->cons.tail;
@@ -508,15 +473,8 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
ENQUEUE_PTRS();
rte_smp_wmb();

- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- else
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
-
r->prod.tail = prod_next;
- return ret;
+ return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
}

/**
@@ -682,8 +640,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -704,8 +660,6 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -730,8 +684,6 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -756,8 +708,6 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -775,8 +725,6 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
@@ -798,8 +746,6 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
diff --git a/test/test/autotest_test_funcs.py b/test/test/autotest_test_funcs.py
index 1c5f390..8da8fcd 100644
--- a/test/test/autotest_test_funcs.py
+++ b/test/test/autotest_test_funcs.py
@@ -292,11 +292,4 @@ def ring_autotest(child, test_name):
elif index == 2:
return -1, "Fail [Timeout]"

- child.sendline("set_watermark test 100")
- child.sendline("dump_ring test")
- index = child.expect([" watermark=100",
- pexpect.TIMEOUT], timeout=1)
- if index != 0:
- return -1, "Fail [Bad watermark]"
-
return 0, "Success"
diff --git a/test/test/commands.c b/test/test/commands.c
index 2df46b0..551c81d 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -228,57 +228,6 @@ cmdline_parse_inst_t cmd_dump_one = {

/****************/

-struct cmd_set_ring_result {
- cmdline_fixed_string_t set;
- cmdline_fixed_string_t name;
- uint32_t value;
-};
-
-static void cmd_set_ring_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_set_ring_result *res = parsed_result;
- struct rte_ring *r;
- int ret;
-
- r = rte_ring_lookup(res->name);
- if (r == NULL) {
- cmdline_printf(cl, "Cannot find ring\n");
- return;
- }
-
- if (!strcmp(res->set, "set_watermark")) {
- ret = rte_ring_set_water_mark(r, res->value);
- if (ret != 0)
- cmdline_printf(cl, "Cannot set water mark\n");
- }
-}
-
-cmdline_parse_token_string_t cmd_set_ring_set =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set,
- "set_watermark");
-
-cmdline_parse_token_string_t cmd_set_ring_name =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL);
-
-cmdline_parse_token_num_t cmd_set_ring_value =
- TOKEN_NUM_INITIALIZER(struct cmd_set_ring_result, value, UINT32);
-
-cmdline_parse_inst_t cmd_set_ring = {
- .f = cmd_set_ring_parsed, /* function to call */
- .data = NULL, /* 2nd arg of func */
- .help_str = "set watermark: "
- "set_watermark <ring_name> <value>",
- .tokens = { /* token list, NULL terminated */
- (void *)&cmd_set_ring_set,
- (void *)&cmd_set_ring_name,
- (void *)&cmd_set_ring_value,
- NULL,
- },
-};
-
-/****************/
-
struct cmd_quit_result {
cmdline_fixed_string_t quit;
};
@@ -419,7 +368,6 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_autotest,
(cmdline_parse_inst_t *)&cmd_dump,
(cmdline_parse_inst_t *)&cmd_dump_one,
- (cmdline_parse_inst_t *)&cmd_set_ring,
(cmdline_parse_inst_t *)&cmd_quit,
(cmdline_parse_inst_t *)&cmd_set_rxtx,
(cmdline_parse_inst_t *)&cmd_set_rxtx_anchor,
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 3891f5d..666a451 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -78,21 +78,6 @@
* - Dequeue one object, two objects, MAX_BULK objects
* - Check that dequeued pointers are correct
*
- * - Test watermark and default bulk enqueue/dequeue:
- *
- * - Set watermark
- * - Set default bulk value
- * - Enqueue objects, check that -EDQUOT is returned when
- * watermark is exceeded
- * - Check that dequeued pointers are correct
- *
- * #. Check live watermark change
- *
- * - Start a loop on another lcore that will enqueue and dequeue
- * objects in a ring. It will monitor the value of watermark.
- * - At the same time, change the watermark on the master lcore.
- * - The slave lcore will check that watermark changes from 16 to 32.
- *
* #. Performance tests.
*
* Tests done in test_ring_perf.c
@@ -115,123 +100,6 @@ static struct rte_ring *r;

#define TEST_RING_FULL_EMTPY_ITER 8

-static int
-check_live_watermark_change(__attribute__((unused)) void *dummy)
-{
- uint64_t hz = rte_get_timer_hz();
- void *obj_table[MAX_BULK];
- unsigned watermark, watermark_old = 16;
- uint64_t cur_time, end_time;
- int64_t diff = 0;
- int i, ret;
- unsigned count = 4;
-
- /* init the object table */
- memset(obj_table, 0, sizeof(obj_table));
- end_time = rte_get_timer_cycles() + (hz / 4);
-
- /* check that bulk and watermark are 4 and 32 (respectively) */
- while (diff >= 0) {
-
- /* add in ring until we reach watermark */
- ret = 0;
- for (i = 0; i < 16; i ++) {
- if (ret != 0)
- break;
- ret = rte_ring_enqueue_bulk(r, obj_table, count);
- }
-
- if (ret != -EDQUOT) {
- printf("Cannot enqueue objects, or watermark not "
- "reached (ret=%d)\n", ret);
- return -1;
- }
-
- /* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->watermark;
- if (watermark != watermark_old &&
- (watermark_old != 16 || watermark != 32)) {
- printf("Bad watermark change %u -> %u\n", watermark_old,
- watermark);
- return -1;
- }
- watermark_old = watermark;
-
- /* dequeue objects from ring */
- while (i--) {
- ret = rte_ring_dequeue_bulk(r, obj_table, count);
- if (ret != 0) {
- printf("Cannot dequeue (ret=%d)\n", ret);
- return -1;
- }
- }
-
- cur_time = rte_get_timer_cycles();
- diff = end_time - cur_time;
- }
-
- if (watermark_old != 32 ) {
- printf(" watermark was not updated (wm=%u)\n",
- watermark_old);
- return -1;
- }
-
- return 0;
-}
-
-static int
-test_live_watermark_change(void)
-{
- unsigned lcore_id = rte_lcore_id();
- unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
-
- printf("Test watermark live modification\n");
- rte_ring_set_water_mark(r, 16);
-
- /* launch a thread that will enqueue and dequeue, checking
- * watermark and quota */
- rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
-
- rte_delay_ms(100);
- rte_ring_set_water_mark(r, 32);
- rte_delay_ms(100);
-
- if (rte_eal_wait_lcore(lcore_id2) < 0)
- return -1;
-
- return 0;
-}
-
-/* Test for catch on invalid watermark values */
-static int
-test_set_watermark( void ){
- unsigned count;
- int setwm;
-
- struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
- if(r == NULL){
- printf( " ring lookup failed\n" );
- goto error;
- }
- count = r->size * 2;
- setwm = rte_ring_set_water_mark(r, count);
- if (setwm != -EINVAL){
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
-
- count = 0;
- rte_ring_set_water_mark(r, count);
- if (r->watermark != r->size) {
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
- return 0;
-
-error:
- return -1;
-}
-
/*
* helper routine for test_ring_basic
*/
@@ -418,8 +286,7 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- printf("test watermark and default bulk enqueue / dequeue\n");
- rte_ring_set_water_mark(r, 20);
+ printf("test default bulk enqueue / dequeue\n");
num_elems = 16;

cur_src = src;
@@ -433,8 +300,8 @@ test_ring_basic(void)
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != -EDQUOT) {
- printf("Watermark not exceeded\n");
+ if (ret != 0) {
+ printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
@@ -930,16 +797,6 @@ test_ring(void)
return -1;

/* basic operations */
- if (test_live_watermark_change() < 0)
- return -1;
-
- if ( test_set_watermark() < 0){
- printf ("Test failed to detect invalid parameter\n");
- return -1;
- }
- else
- printf ( "Test detected forced bad watermark values\n");
-
if ( test_create_count_odd() < 0){
printf ("Test failed to detect odd count\n");
return -1;
--
2.9.3
Bruce Richardson
2017-03-29 13:09:34 UTC
Permalink
The bulk fns for rings returns 0 for all elements enqueued and negative
for no space. Change that to make them consistent with the burst functions
in returning the number of elements enqueued/dequeued, i.e. 0 or N.
This change also allows the return value from enq/deq to be used directly
without a branch for error checking.

Signed-off-by: Bruce Richardson <***@intel.com>
Reviewed-by: Yuanhan Liu <***@linux.intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
doc/guides/rel_notes/release_17_05.rst | 11 +++
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
examples/load_balancer/runtime.c | 16 ++-
.../client_server_mp/mp_client/client.c | 8 +-
.../client_server_mp/mp_server/main.c | 2 +-
examples/qos_sched/app_thread.c | 8 +-
examples/server_node_efd/node/node.c | 2 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 12 ++-
lib/librte_ring/rte_ring.h | 109 +++++++--------------
test/test-pipeline/pipeline_hash.c | 2 +-
test/test-pipeline/runtime.c | 8 +-
test/test/test_ring.c | 46 +++++----
test/test/test_ring_perf.c | 8 +-
14 files changed, 106 insertions(+), 130 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 084b359..6da2612 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -137,6 +137,17 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * changed the return value of the enqueue and dequeue bulk functions to
+ match that of the burst equivalents. In all cases, ring functions which
+ operate on multiple packets now return the number of elements enqueued
+ or dequeued, as appropriate. The updated functions are:
+
+ - ``rte_ring_mp_enqueue_bulk``
+ - ``rte_ring_sp_enqueue_bulk``
+ - ``rte_ring_enqueue_bulk``
+ - ``rte_ring_mc_dequeue_bulk``
+ - ``rte_ring_sc_dequeue_bulk``
+ - ``rte_ring_dequeue_bulk``

ABI Changes
-----------
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
index 9b69cfe..e3a63c8 100644
--- a/doc/guides/sample_app_ug/server_node_efd.rst
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -286,7 +286,7 @@ repeated infinitely.

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 6944325..82b10bc 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -146,7 +146,7 @@ app_lcore_io_rx_buffer_to_send (
(void **) lp->rx.mbuf_out[worker].array,
bsz);

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -312,7 +312,7 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
(void **) lp->rx.mbuf_out[worker].array,
lp->rx.mbuf_out[worker].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -349,9 +349,8 @@ app_lcore_io_tx(
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

n_mbufs += bsz_rd;

@@ -505,9 +504,8 @@ app_lcore_worker(
(void **) lp->mbuf_in.array,
bsz_rd);

- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }

#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@ -559,7 +557,7 @@ app_lcore_worker(

#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@ -572,7 +570,7 @@ app_lcore_worker(
}
#endif

- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -609,7 +607,7 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
(void **) lp->mbuf_out[port].array,
lp->mbuf_out[port].n_mbufs);

- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index d4f9ca3..dca9eb9 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -276,14 +276,10 @@ main(int argc, char *argv[])
printf("[Press Ctrl-C to quit ...]\n");

for (;;) {
- uint16_t i, rx_pkts = PKT_READ_SIZE;
+ uint16_t i, rx_pkts;
uint8_t port;

- /* try dequeuing max possible packets first, if that fails, get the
- * most we can. Loop body should only execute once, maximum */
- while (rx_pkts > 0 &&
- unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
- rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts, PKT_READ_SIZE);

if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index a6dc12d..19c95b2 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) != 0){
+ cl_rx_buf[client].count) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 70fdcdb..dab4594 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) != 0)) {
+ (void **)rx_mbufs, nb_rx) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -180,7 +180,7 @@ app_tx_thread(struct thread_conf **confs)
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
burst_conf.qos_dequeue);
- if (likely(retval == 0)) {
+ if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);

conf->counter = 0; /* reset empty read loop counter */
@@ -230,7 +230,9 @@ app_worker_thread(struct thread_conf **confs)
nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
- while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
+ (void **)mbufs, nb_pkt) == 0)
+ ; /* empty body */

conf_idx++;
if (confs[conf_idx] == NULL)
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index a6c0c70..9ec6a05 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -392,7 +392,7 @@ main(int argc, char *argv[])
*/
while (rx_pkts > 0 &&
unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
- rx_pkts) != 0))
+ rx_pkts) == 0))
rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
PKT_READ_SIZE);

diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 1a54d1b..3eb7fac 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != 0){
+ cl_rx_buf[node].count) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index b9aa64d..409b860 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -42,26 +42,30 @@ static int
common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n) == 0 ? -ENOBUFS : 0;
}

static unsigned
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 906e8ae..34b438c 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -349,14 +349,10 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -388,7 +384,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -414,7 +410,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -430,14 +426,10 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -457,7 +449,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOBUFS;
+ return 0;
else {
/* No free entry available */
if (unlikely(free_entries == 0))
@@ -474,7 +466,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
- return (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
+ return n;
}

/**
@@ -495,16 +487,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/

-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -536,7 +523,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
/* Set the actual entries for dequeue */
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -562,7 +549,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,

r->cons.tail = cons_next;

- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -580,15 +567,10 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned n, enum rte_ring_queue_behavior behavior)
{
@@ -607,7 +589,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,

if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED)
- return -ENOENT;
+ return 0;
else {
if (unlikely(entries == 0))
return 0;
@@ -623,7 +605,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
rte_smp_rmb();

r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}

/**
@@ -639,10 +621,9 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueue.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -659,10 +640,9 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -683,10 +663,9 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
- * - 0: Success; objects enqueued.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
@@ -713,7 +692,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -730,7 +709,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -751,10 +730,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.single)
- return rte_ring_sp_enqueue(r, obj);
- else
- return rte_ring_mp_enqueue(r, obj);
+ return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
}

/**
@@ -770,11 +746,9 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -791,11 +765,9 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
@@ -815,11 +787,9 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue, no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
if (r->cons.single)
@@ -846,7 +816,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -864,7 +834,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
@@ -886,10 +856,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.single)
- return rte_ring_sc_dequeue(r, obj_p);
- else
- return rte_ring_mc_dequeue(r, obj_p);
+ return rte_ring_dequeue_bulk(r, obj_p, 1) ? 0 : -ENOBUFS;
}

/**
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 10d2869..1ac0aa8 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -547,6 +547,6 @@ app_main_loop_rx_metadata(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 42a6142..4e20669 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -98,7 +98,7 @@ app_main_loop_rx(void) {
app.rings_rx[i],
(void **) app.mbuf_rx.array,
n_mbufs);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -123,7 +123,7 @@ app_main_loop_worker(void) {
(void **) worker_mbuf->array,
app.burst_size_worker_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

do {
@@ -131,7 +131,7 @@ app_main_loop_worker(void) {
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
app.burst_size_worker_write);
- } while (ret < 0);
+ } while (ret == 0);
}
}

@@ -152,7 +152,7 @@ app_main_loop_tx(void) {
(void **) &app.mbuf_tx[i].array[n_mbufs],
app.burst_size_tx_read);

- if (ret == -ENOENT)
+ if (ret == 0)
continue;

n_mbufs += app.burst_size_tx_read;
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 666a451..112433b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -117,20 +117,18 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rand));
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rsz));
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
TEST_RING_VERIFY(0 == rte_ring_empty(r));

/* empty the ring */
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz) == rsz);
TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
TEST_RING_VERIFY(0 == rte_ring_count(r));
TEST_RING_VERIFY(0 == rte_ring_full(r));
@@ -171,37 +169,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -217,37 +215,37 @@ test_ring_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
cur_src += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
cur_src += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 1 obj\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
cur_dst += 1;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue 2 objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
cur_dst += 2;
- if (ret != 0)
+ if (ret == 0)
goto fail;

printf("dequeue MAX_BULK objs\n");
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;

/* check data */
@@ -264,11 +262,11 @@ test_ring_basic(void)
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
- if (ret != 0)
+ if (ret == 0)
goto fail;
}

@@ -294,25 +292,25 @@ test_ring_basic(void)

ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
cur_src += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue\n");
goto fail;
}
ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
cur_dst += num_elems;
- if (ret != 0) {
+ if (ret == 0) {
printf("Cannot dequeue2\n");
goto fail;
}
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 320c20c..8ccbdef 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -230,13 +230,13 @@ dequeue_bulk(void *p)

const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();
--
2.9.3
Bruce Richardson
2017-03-29 13:09:35 UTC
Permalink
Add an extra parameter to the ring enqueue burst/bulk functions so that
those functions can optionally return the amount of free space in the
ring. This information can be used by applications in a number of ways,
for instance, with single-producer queues, it provides a max
enqueue size which is guaranteed to work. It can also be used to
implement watermark functionality in apps, replacing the older
functionality with a more flexible version, which enables apps to
implement multiple watermark thresholds, rather than just one.

Signed-off-by: Bruce Richardson <***@intel.com>
Reviewed-by: Yuanhan Liu <***@linux.intel.com>
Acked-by: Olivier Matz <***@6wind.com>
---
V5: Added missing updates to perf thread example.
Added missing doxygen comments to new functions.
V4: Added missing updates to crypto PMDs.
---
doc/guides/rel_notes/release_17_05.rst | 3 +
doc/guides/sample_app_ug/server_node_efd.rst | 2 +-
drivers/crypto/armv8/rte_armv8_pmd.c | 6 +-
drivers/crypto/kasumi/rte_kasumi_pmd.c | 4 +-
drivers/crypto/snow3g/rte_snow3g_pmd.c | 4 +-
drivers/crypto/zuc/rte_zuc_pmd.c | 2 +-
drivers/net/ring/rte_eth_ring.c | 2 +-
examples/distributor/main.c | 3 +-
examples/load_balancer/runtime.c | 12 ++-
.../client_server_mp/mp_server/main.c | 2 +-
examples/packet_ordering/main.c | 7 +-
examples/performance-thread/l3fwd-thread/main.c | 4 +-
examples/qos_sched/app_thread.c | 4 +-
examples/server_node_efd/server/main.c | 2 +-
lib/librte_hash/rte_cuckoo_hash.c | 2 +-
lib/librte_mempool/rte_mempool_ring.c | 4 +-
lib/librte_pdump/rte_pdump.c | 2 +-
lib/librte_port/rte_port_ras.c | 2 +-
lib/librte_port/rte_port_ring.c | 28 +++---
lib/librte_ring/rte_ring.h | 107 ++++++++++++---------
test/test-pipeline/pipeline_hash.c | 3 +-
test/test-pipeline/runtime.c | 5 +-
test/test/test_link_bonding_mode4.c | 3 +-
test/test/test_pmd_ring_perf.c | 5 +-
test/test/test_ring.c | 55 +++++------
test/test/test_ring_perf.c | 16 +--
test/test/test_table_ports.c | 4 +-
test/test/virtual_pmd.c | 4 +-
28 files changed, 169 insertions(+), 128 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 6da2612..b361a98 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -137,6 +137,9 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
* removed the function ``rte_ring_set_water_mark`` as part of a general
removal of watermarks support in the library.
+ * added an extra parameter to the burst/bulk enqueue functions to
+ return the number of free spaces in the ring after enqueue. This can
+ be used by an application to implement its own watermark functionality.
* changed the return value of the enqueue and dequeue bulk functions to
match that of the burst equivalents. In all cases, ring functions which
operate on multiple packets now return the number of elements enqueued
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
index e3a63c8..c2a5f20 100644
--- a/doc/guides/sample_app_ug/server_node_efd.rst
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -286,7 +286,7 @@ repeated infinitely.

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != cl_rx_buf[node].count){
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index d2b88a3..37ecd7b 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -739,13 +739,15 @@ armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
goto enqueue_err;
}

- retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i);
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
qp->stats.enqueued_count += retval;

return retval;

enqueue_err:
- retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i);
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
if (ops[i] != NULL)
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;

diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 234921e..1dd05cb 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -359,7 +359,7 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
}

enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;

@@ -410,7 +410,7 @@ process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
}

enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
- processed_op);
+ processed_op, NULL);
qp->qp_stats.enqueued_count += enqueued_op;
*accumulated_enqueued_ops += enqueued_op;

diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index ca97271..01c4e1c 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -363,7 +363,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
}

enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;

@@ -414,7 +414,7 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
}

enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)&op, processed_op);
+ (void **)&op, processed_op, NULL);
qp->qp_stats.enqueued_count += enqueued_op;
*accumulated_enqueued_ops += enqueued_op;

diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index 6f9c06a..5e2dbf5 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -339,7 +339,7 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
}

enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;

diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 6f9cc1a..adbf478 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -102,7 +102,7 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SP_ENQ) {
r->tx_pkts.cnt += nb_tx;
r->err_pkts.cnt += nb_bufs - nb_tx;
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 7b8a759..bb84f13 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -238,7 +238,8 @@ lcore_rx(struct lcore_params *p)
continue;
}

- uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
+ uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs,
+ nb_ret, NULL);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
RTE_LOG_DP(DEBUG, DISTRAPP,
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 82b10bc..1645994 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -144,7 +144,8 @@ app_lcore_io_rx_buffer_to_send (
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- bsz);
+ bsz,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -310,7 +311,8 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- lp->rx.mbuf_out[worker].n_mbufs);
+ lp->rx.mbuf_out[worker].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
@@ -553,7 +555,8 @@ app_lcore_worker(
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- bsz_wr);
+ bsz_wr,
+ NULL);

#if APP_STATS
lp->rings_out_iters[port] ++;
@@ -605,7 +608,8 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- lp->mbuf_out[port].n_mbufs);
+ lp->mbuf_out[port].n_mbufs,
+ NULL);

if (unlikely(ret == 0)) {
uint32_t k;
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index 19c95b2..c2b0261 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)

cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) == 0){
+ cl_rx_buf[client].count, NULL) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index a448039..569b6da 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -421,8 +421,8 @@ rx_thread(struct rte_ring *ring_out)
pkts[i++]->seqn = seqn++;

/* enqueue to rx_to_workers ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
- nb_rx_pkts);
+ ret = rte_ring_enqueue_burst(ring_out,
+ (void *)pkts, nb_rx_pkts, NULL);
app_stats.rx.enqueue_pkts += ret;
if (unlikely(ret < nb_rx_pkts)) {
app_stats.rx.enqueue_failed_pkts +=
@@ -473,7 +473,8 @@ worker_thread(void *args_ptr)
burst_buffer[i++]->port ^= xor_val;

/* enqueue the modified mbufs to workers_to_tx ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
+ ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
+ burst_size, NULL);
__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
if (unlikely(ret < burst_size)) {
/* Return the mbufs to their respective pool, dropping packets */
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index bf92582..b4c0df1 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -2213,7 +2213,7 @@ lthread_rx(void *dummy)
ret = rte_ring_sp_enqueue_burst(
rx_conf->ring[worker_id],
(void **) pkts_burst,
- nb_rx);
+ nb_rx, NULL);

new_len = old_len + ret;

@@ -2453,7 +2453,7 @@ pthread_rx(void *dummy)
SET_CPU_BUSY(rx_conf, CPU_PROCESS);
worker_id = (worker_id + 1) % rx_conf->n_ring;
n = rte_ring_sp_enqueue_burst(rx_conf->ring[worker_id],
- (void **)pkts_burst, nb_rx);
+ (void **)pkts_burst, nb_rx, NULL);

if (unlikely(n != nb_rx)) {
uint32_t k;
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index dab4594..0c81a15 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}

if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) == 0)) {
+ (void **)rx_mbufs, nb_rx, NULL) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);

@@ -231,7 +231,7 @@ app_worker_thread(struct thread_conf **confs)
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
- (void **)mbufs, nb_pkt) == 0)
+ (void **)mbufs, nb_pkt, NULL) == 0)
; /* empty body */

conf_idx++;
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index 3eb7fac..597b4c2 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -247,7 +247,7 @@ flush_rx_queue(uint16_t node)

cl = &nodes[node];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
- cl_rx_buf[node].count) != cl_rx_buf[node].count){
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
for (j = 0; j < cl_rx_buf[node].count; j++)
rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[node].count;
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 51db006..6552199 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -808,7 +808,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
/* Need to enqueue the free slots in global ring. */
n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
cached_free_slots->objs,
- LCORE_CACHE_SIZE);
+ LCORE_CACHE_SIZE, NULL);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index 409b860..9b8fd2b 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -43,7 +43,7 @@ common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_mp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
@@ -51,7 +51,7 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
return rte_ring_sp_enqueue_bulk(mp->pool_data,
- obj_table, n) == 0 ? -ENOBUFS : 0;
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index cc0b5b1..b599d65 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -197,7 +197,7 @@ pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
dup_bufs[d_pkts++] = p;
}

- ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts);
+ ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
if (unlikely(ring_enq < d_pkts)) {
RTE_LOG(DEBUG, PDUMP,
"only %d of packets enqueued to ring\n", ring_enq);
diff --git a/lib/librte_port/rte_port_ras.c b/lib/librte_port/rte_port_ras.c
index c4bb508..4de0945 100644
--- a/lib/librte_port/rte_port_ras.c
+++ b/lib/librte_port/rte_port_ras.c
@@ -167,7 +167,7 @@ send_burst(struct rte_port_ring_writer_ras *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 0df1bcf..c5dbe07 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -241,7 +241,7 @@ send_burst(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -256,7 +256,7 @@ send_burst_mp(struct rte_port_ring_writer *p)
uint32_t nb_tx;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -318,11 +318,11 @@ rte_port_ring_writer_tx_bulk_internal(void *port,

RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
- n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
- n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
@@ -517,7 +517,7 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -527,7 +527,8 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_sp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -550,7 +551,7 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;

nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);

/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -560,7 +561,8 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)

for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_mp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);

/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -633,10 +635,12 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
n_pkts_ok =
- rte_ring_mp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
n_pkts_ok =
- rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);

if (n_pkts_ok >= n_pkts)
return 0;
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 34b438c..439698b 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -354,20 +354,16 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, prod_next;
uint32_t cons_tail, free_entries;
- const unsigned max = n;
+ const unsigned int max = n;
int success;
unsigned int i;
uint32_t mask = r->mask;

- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
-
/* move prod.head atomically */
do {
/* Reset n to the initial burst count */
@@ -382,16 +378,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = (mask + cons_tail - prod_head);

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : free_entries;
+
+ if (n == 0)
+ goto end;

prod_next = prod_head + n;
success = rte_atomic32_cmpset(&r->prod.head, prod_head,
@@ -410,6 +402,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_pause();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -431,7 +426,8 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
*/
static inline unsigned int __attribute__((always_inline))
__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ unsigned int *free_space)
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
@@ -447,16 +443,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
free_entries = mask + cons_tail - prod_head;

/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED)
- return 0;
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0))
- return 0;
- n = free_entries;
- }
- }
+ if (unlikely(n > free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
+
+ if (n == 0)
+ goto end;
+

prod_next = prod_head + n;
r->prod.head = prod_next;
@@ -466,6 +458,9 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();

r->prod.tail = prod_next;
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
return n;
}

@@ -620,14 +615,18 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* The number of objects enqueued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -639,14 +638,18 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* The number of objects enqueued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ free_space);
}

/**
@@ -662,17 +665,20 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* The number of objects enqueued, either 0 or n
*/
static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.single)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space);
}

/**
@@ -692,7 +698,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -709,7 +715,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -730,7 +736,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_enqueue_bulk(r, &obj, 1) ? 0 : -ENOBUFS;
+ return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}

/**
@@ -971,14 +977,18 @@ struct rte_ring *rte_ring_lookup(const char *name);
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_mp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -990,14 +1000,18 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_sp_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, free_space);
}

/**
@@ -1013,17 +1027,20 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
if (r->prod.single)
- return rte_ring_sp_enqueue_burst(r, obj_table, n);
+ return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space);
else
- return rte_ring_mp_enqueue_burst(r, obj_table, n);
+ return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space);
}

/**
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 1ac0aa8..0c6e04f 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -546,7 +546,8 @@ app_main_loop_rx_metadata(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs,
+ NULL);
} while (ret == 0);
}
}
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 4e20669..c06ff54 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -97,7 +97,7 @@ app_main_loop_rx(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
+ n_mbufs, NULL);
} while (ret == 0);
}
}
@@ -130,7 +130,8 @@ app_main_loop_worker(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
- app.burst_size_worker_write);
+ app.burst_size_worker_write,
+ NULL);
} while (ret == 0);
}
}
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 53caa3e..8df28b4 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -206,7 +206,8 @@ slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
static int
slave_put_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf, size);
+ return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf,
+ size, NULL);
}

static uint16_t
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index af011f7..045a7f2 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -98,7 +98,7 @@ test_single_enqueue_dequeue(void)
const uint64_t sc_start = rte_rdtsc_precise();
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
- rte_ring_enqueue_bulk(r, &burst, 1);
+ rte_ring_enqueue_bulk(r, &burst, 1, NULL);
rte_ring_dequeue_bulk(r, &burst, 1);
}
const uint64_t sc_end = rte_rdtsc_precise();
@@ -131,7 +131,8 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index 112433b..b0ca88b 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -117,11 +117,12 @@ test_ring_basic_full_empty(void * const src[], void *dst[])
rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
printf("%s: iteration %u, random shift: %u;\n",
__func__, i, rand);
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
+ NULL) != 0);
TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand) == rand);

/* fill the ring */
- TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz) != 0);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
TEST_RING_VERIFY(0 == rte_ring_free_count(r));
TEST_RING_VERIFY(rsz == rte_ring_count(r));
TEST_RING_VERIFY(rte_ring_full(r));
@@ -167,19 +168,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -213,19 +214,19 @@ test_ring_basic(void)
cur_dst = dst;

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1, NULL);
cur_src += 1;
if (ret == 0)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2, NULL);
cur_src += 2;
if (ret == 0)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -260,7 +261,7 @@ test_ring_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if (ret == 0)
goto fail;
@@ -290,13 +291,13 @@ test_ring_basic(void)
cur_src = src;
cur_dst = dst;

- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
goto fail;
}
- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
cur_src += num_elems;
if (ret == 0) {
printf("Cannot enqueue\n");
@@ -371,19 +372,19 @@ test_ring_burst_basic(void)

printf("Test SP & SC basic functions \n");
printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK) ;
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -419,7 +420,7 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
goto fail;
@@ -427,14 +428,14 @@ test_ring_burst_basic(void)
}

printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
/* Always one free entry left */
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -444,7 +445,7 @@ test_ring_burst_basic(void)
goto fail;

printf("Test enqueue for a full entry \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;

@@ -486,19 +487,19 @@ test_ring_burst_basic(void)
printf("Test MP & MC basic functions \n");

printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 1);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
if ((ret & RTE_RING_SZ_MASK) != 1)
goto fail;

printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -534,7 +535,7 @@ test_ring_burst_basic(void)

printf("fill and empty the ring\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
@@ -557,19 +558,19 @@ test_ring_burst_basic(void)

printf("Test enqueue without enough memory space \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
goto fail;
}

/* Available memory space for the exact MAX_BULK objects */
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;

- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
goto fail;
@@ -607,7 +608,7 @@ test_ring_burst_basic(void)

printf("Covering rte_ring_enqueue_burst functions \n");

- ret = rte_ring_enqueue_burst(r, cur_src, 2);
+ ret = rte_ring_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
if ((ret & RTE_RING_SZ_MASK) != 2)
goto fail;
@@ -746,7 +747,7 @@ test_ring_basic_ex(void)
}

/* Covering the ring burst operation */
- ret = rte_ring_enqueue_burst(rp, obj, 2);
+ ret = rte_ring_enqueue_burst(rp, obj, 2, NULL);
if ((ret & RTE_RING_SZ_MASK) != 2) {
printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
goto fail_test;
diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 8ccbdef..f95a8e9 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)

const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();

const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) == 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();

@@ -323,14 +323,16 @@ test_burst_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
@@ -357,14 +359,16 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t sc_end = rte_rdtsc();

const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
}
const uint64_t mc_end = rte_rdtsc();
diff --git a/test/test/test_table_ports.c b/test/test/test_table_ports.c
index 2532367..395f4f3 100644
--- a/test/test/test_table_ports.c
+++ b/test/test/test_table_ports.c
@@ -80,7 +80,7 @@ test_port_ring_reader(void)
mbuf[0] = (void *)rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- mbuf, 1);
+ mbuf, 1, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);

if (received_pkts < expected_pkts)
@@ -93,7 +93,7 @@ test_port_ring_reader(void)
mbuf[i] = rte_pktmbuf_alloc(pool);

expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
RTE_PORT_IN_BURST_SIZE_MAX);

diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 6e4dcd8..39e070c 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -380,7 +380,7 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
nb_pkts = 0;
else
nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
- nb_pkts);
+ nb_pkts, NULL);

/* increment opacket count */
dev_private->eth_stats.opackets += nb_pkts;
@@ -496,7 +496,7 @@ virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
vrtl_eth_dev->data->dev_private;

return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}

int
--
2.9.3
Loading...