Skip to content

Commit

Permalink
Use a lower threshold for shrinking the ring buffer
Browse files Browse the repository at this point in the history
Shrink by 50% when the buffer is < 25% occupied. This provides room
for growth after shrinking without having to immediately grow the
ring buffer. For example, if we were to shrink by 50% when the buffer
was < 50% occupied, adding a new item after shrinking would cause the
buffer to grow immediately. If the number of items in the deque oscillates
around 50% occupancy we would end up with accidentally quadratic behavior;
there would be a high chance that each `put` and `get` operation would
cause the ring buffer to be resized.
  • Loading branch information
mpage committed Jan 18, 2024
1 parent bc03b85 commit 37b45d2
Showing 1 changed file with 29 additions and 39 deletions.
68 changes: 29 additions & 39 deletions Modules/_queuemodule.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,24 @@ RingBuf_Fini(RingBuf *buf)
PyMem_Free(items);
}

static void
coalesce_items(RingBuf *buf, PyObject **new_items, Py_ssize_t new_capacity)
// Resize the underlying items array of buf to the new capacity and arrange
// the items contiguously in the new items array.
//
// Returns -1 on allocation failure or 0 on success.
static int
resize_ringbuf(RingBuf *buf, Py_ssize_t capacity)
{
Py_ssize_t new_capacity = Py_MAX(INITIAL_RING_BUF_CAPACITY, capacity);
if (new_capacity == buf->items_cap) {
return;
}
assert(buf->num_items <= new_capacity);

PyObject **new_items = PyMem_Calloc(new_capacity, sizeof(PyObject *));
if (new_items == NULL) {
return -1;
}

// Copy the "tail" of the old items array. This corresponds to "head" of
// the abstract ring buffer.
Py_ssize_t tail_size = Py_MIN(buf->num_items, buf->items_cap - buf->get_idx);
Expand All @@ -107,26 +122,8 @@ coalesce_items(RingBuf *buf, PyObject **new_items, Py_ssize_t new_capacity)
buf->items_cap = new_capacity;
buf->get_idx = 0;
buf->put_idx = buf->num_items;
}

static void
shrink_ringbuf(RingBuf *buf)
{
Py_ssize_t new_capacity =
Py_MAX(INITIAL_RING_BUF_CAPACITY, buf->items_cap / 2);
assert(new_capacity >= buf->num_items);
if (new_capacity == buf->items_cap) {
return;
}

PyObject **new_items = PyMem_Calloc(new_capacity, sizeof(PyObject *));
if (new_items == NULL) {
// It's safe to ignore the failure; shrinking is an optimization and
// isn't required for correctness.
return;
}

coalesce_items(buf, new_items, new_capacity);
return 0;
}

// Returns an owned reference
Expand All @@ -135,9 +132,14 @@ RingBuf_Get(RingBuf *buf)
{
assert(buf->num_items > 0);

if (buf->num_items < (buf->items_cap / 2)) {
// Items is less than 50% occupied, shrink it
shrink_ringbuf(buf);
if (buf->num_items < (buf->items_cap / 4)) {
// Items is less than 25% occupied, shrink it by 50%. This allows for
// growth without immediately needing to resize the underlying items
// array.
//
// It's safe it ignore allocation failures here; shrinking is an
// optimization that isn't required for correctness.
resize_ringbuf(buf, buf->items_cap / 2);
}

PyObject *item = buf->items[buf->get_idx];
Expand All @@ -147,27 +149,15 @@ RingBuf_Get(RingBuf *buf)
return item;
}

static int
grow_ringbuf(RingBuf *buf)
{
Py_ssize_t new_capacity =
Py_MAX(INITIAL_RING_BUF_CAPACITY, buf->items_cap * 2);
PyObject **new_items = PyMem_Calloc(new_capacity, sizeof(PyObject *));
if (new_items == NULL) {
PyErr_NoMemory();
return -1;
}
coalesce_items(buf, new_items, new_capacity);
return 0;
}

// Returns 0 on success or -1 if the buffer failed to grow.
// Steals a reference to item.
static int
RingBuf_Put(RingBuf *buf, PyObject *item)
{
if (buf->num_items == buf->items_cap) {
if (grow_ringbuf(buf) < 0) {
// Buffer is full, grow it.
if (resize_ringbuf(buf, buf->items_cap * 2) < 0) {
PyErr_NoMemory();
return -1;
}
}
Expand Down

0 comments on commit 37b45d2

Please sign in to comment.