diff --git a/Modules/_queuemodule.c b/Modules/_queuemodule.c index d43665597e9d421..7bac461e98a50b6 100644 --- a/Modules/_queuemodule.c +++ b/Modules/_queuemodule.c @@ -85,9 +85,24 @@ RingBuf_Fini(RingBuf *buf) PyMem_Free(items); } -static void -coalesce_items(RingBuf *buf, PyObject **new_items, Py_ssize_t new_capacity) +// Resize the underlying items array of buf to the new capacity and arrange +// the items contiguously in the new items array. +// +// Returns -1 on allocation failure or 0 on success. +static int +resize_ringbuf(RingBuf *buf, Py_ssize_t capacity) { + Py_ssize_t new_capacity = Py_MAX(INITIAL_RING_BUF_CAPACITY, capacity); + if (new_capacity == buf->items_cap) { + return; + } + assert(buf->num_items <= new_capacity); + + PyObject **new_items = PyMem_Calloc(new_capacity, sizeof(PyObject *)); + if (new_items == NULL) { + return -1; + } + // Copy the "tail" of the old items array. This corresponds to "head" of // the abstract ring buffer. Py_ssize_t tail_size = Py_MIN(buf->num_items, buf->items_cap - buf->get_idx); @@ -107,26 +122,8 @@ coalesce_items(RingBuf *buf, PyObject **new_items, Py_ssize_t new_capacity) buf->items_cap = new_capacity; buf->get_idx = 0; buf->put_idx = buf->num_items; -} - -static void -shrink_ringbuf(RingBuf *buf) -{ - Py_ssize_t new_capacity = - Py_MAX(INITIAL_RING_BUF_CAPACITY, buf->items_cap / 2); - assert(new_capacity >= buf->num_items); - if (new_capacity == buf->items_cap) { - return; - } - PyObject **new_items = PyMem_Calloc(new_capacity, sizeof(PyObject *)); - if (new_items == NULL) { - // It's safe to ignore the failure; shrinking is an optimization and - // isn't required for correctness. - return; - } - - coalesce_items(buf, new_items, new_capacity); + return 0; } // Returns an owned reference @@ -135,9 +132,14 @@ RingBuf_Get(RingBuf *buf) { assert(buf->num_items > 0); - if (buf->num_items < (buf->items_cap / 2)) { - // Items is less than 50% occupied, shrink it - shrink_ringbuf(buf); + if (buf->num_items < (buf->items_cap / 4)) { + // Items is less than 25% occupied, shrink it by 50%. This allows for + // growth without immediately needing to resize the underlying items + // array. + // + // It's safe it ignore allocation failures here; shrinking is an + // optimization that isn't required for correctness. + resize_ringbuf(buf, buf->items_cap / 2); } PyObject *item = buf->items[buf->get_idx]; @@ -147,27 +149,15 @@ RingBuf_Get(RingBuf *buf) return item; } -static int -grow_ringbuf(RingBuf *buf) -{ - Py_ssize_t new_capacity = - Py_MAX(INITIAL_RING_BUF_CAPACITY, buf->items_cap * 2); - PyObject **new_items = PyMem_Calloc(new_capacity, sizeof(PyObject *)); - if (new_items == NULL) { - PyErr_NoMemory(); - return -1; - } - coalesce_items(buf, new_items, new_capacity); - return 0; -} - // Returns 0 on success or -1 if the buffer failed to grow. // Steals a reference to item. static int RingBuf_Put(RingBuf *buf, PyObject *item) { if (buf->num_items == buf->items_cap) { - if (grow_ringbuf(buf) < 0) { + // Buffer is full, grow it. + if (resize_ringbuf(buf, buf->items_cap * 2) < 0) { + PyErr_NoMemory(); return -1; } }