Skip to content
This repository was archived by the owner on May 25, 2023. It is now read-only.

Commit

Permalink
iobufpool uses *[]byte instead of []byte to reduce allocations
Browse files Browse the repository at this point in the history
  • Loading branch information
jackc committed Jan 28, 2023
1 parent bc75429 commit eee854f
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 35 deletions.
25 changes: 19 additions & 6 deletions internal/iobufpool/iobufpool.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
// Package iobufpool implements a global segregated-fit pool of buffers for IO.
//
// It uses *[]byte instead of []byte to avoid the sync.Pool allocation with Put. Unfortunately, using a pointer to avoid
// an allocation is purposely not documented. https://github.com/golang/go/issues/16323
package iobufpool

import "sync"
Expand All @@ -10,17 +13,27 @@ var pools [18]*sync.Pool
func init() {
for i := range pools {
bufLen := 1 << (minPoolExpOf2 + i)
pools[i] = &sync.Pool{New: func() any { return make([]byte, bufLen) }}
pools[i] = &sync.Pool{
New: func() any {
buf := make([]byte, bufLen)
return &buf
},
}
}
}

// Get gets a []byte of len size with cap <= size*2.
func Get(size int) []byte {
func Get(size int) *[]byte {
i := getPoolIdx(size)
if i >= len(pools) {
return make([]byte, size)
buf := make([]byte, size)
return &buf
}
return pools[i].Get().([]byte)[:size]

ptrBuf := (pools[i].Get().(*[]byte))
*ptrBuf = (*ptrBuf)[:size]

return ptrBuf
}

func getPoolIdx(size int) int {
Expand All @@ -36,8 +49,8 @@ func getPoolIdx(size int) int {
}

// Put returns buf to the pool.
func Put(buf []byte) {
i := putPoolIdx(cap(buf))
func Put(buf *[]byte) {
i := putPoolIdx(cap(*buf))
if i < 0 {
return
}
Expand Down
12 changes: 6 additions & 6 deletions internal/nbconn/bufferqueue.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@ const minBufferQueueLen = 8

type bufferQueue struct {
lock sync.Mutex
queue [][]byte
queue []*[]byte
r, w int
}

func (bq *bufferQueue) pushBack(buf []byte) {
func (bq *bufferQueue) pushBack(buf *[]byte) {
bq.lock.Lock()
defer bq.lock.Unlock()

Expand All @@ -23,7 +23,7 @@ func (bq *bufferQueue) pushBack(buf []byte) {
bq.w++
}

func (bq *bufferQueue) pushFront(buf []byte) {
func (bq *bufferQueue) pushFront(buf *[]byte) {
bq.lock.Lock()
defer bq.lock.Unlock()

Expand All @@ -35,7 +35,7 @@ func (bq *bufferQueue) pushFront(buf []byte) {
bq.w++
}

func (bq *bufferQueue) popFront() []byte {
func (bq *bufferQueue) popFront() *[]byte {
bq.lock.Lock()
defer bq.lock.Unlock()

Expand All @@ -51,7 +51,7 @@ func (bq *bufferQueue) popFront() []byte {
bq.r = 0
bq.w = 0
if len(bq.queue) > minBufferQueueLen {
bq.queue = make([][]byte, minBufferQueueLen)
bq.queue = make([]*[]byte, minBufferQueueLen)
}
}

Expand All @@ -64,7 +64,7 @@ func (bq *bufferQueue) growQueue() {
desiredLen = minBufferQueueLen
}

newQueue := make([][]byte, desiredLen)
newQueue := make([]*[]byte, desiredLen)
copy(newQueue, bq.queue)
bq.queue = newQueue
}
18 changes: 9 additions & 9 deletions internal/nbconn/nbconn.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,9 @@ func (c *NetConn) Read(b []byte) (n int, err error) {
if buf == nil {
break
}
copiedN := copy(b[n:], buf)
if copiedN < len(buf) {
buf = buf[copiedN:]
copiedN := copy(b[n:], *buf)
if copiedN < len(*buf) {
*buf = (*buf)[copiedN:]
c.readQueue.pushFront(buf)
} else {
iobufpool.Put(buf)
Expand Down Expand Up @@ -168,7 +168,7 @@ func (c *NetConn) Write(b []byte) (n int, err error) {
}

buf := iobufpool.Get(len(b))
copy(buf, b)
copy(*buf, b)
c.writeQueue.pushBack(buf)
return len(b), nil
}
Expand Down Expand Up @@ -286,14 +286,14 @@ func (c *NetConn) flush() error {
}()

for buf := c.writeQueue.popFront(); buf != nil; buf = c.writeQueue.popFront() {
remainingBuf := buf
remainingBuf := *buf
for len(remainingBuf) > 0 {
n, err := c.nonblockingWrite(remainingBuf)
remainingBuf = remainingBuf[n:]
if err != nil {
if !errors.Is(err, ErrWouldBlock) {
buf = buf[:len(remainingBuf)]
copy(buf, remainingBuf)
*buf = (*buf)[:len(remainingBuf)]
copy(*buf, remainingBuf)
c.writeQueue.pushFront(buf)
return err
}
Expand Down Expand Up @@ -321,9 +321,9 @@ func (c *NetConn) flush() error {
func (c *NetConn) BufferReadUntilBlock() error {
for {
buf := iobufpool.Get(8 * 1024)
n, err := c.nonblockingRead(buf)
n, err := c.nonblockingRead(*buf)
if n > 0 {
buf = buf[:n]
*buf = (*buf)[:n]
c.readQueue.pushBack(buf)
} else if n == 0 {
iobufpool.Put(buf)
Expand Down
10 changes: 5 additions & 5 deletions pgconn/pgconn.go
Original file line number Diff line number Diff line change
Expand Up @@ -1175,20 +1175,20 @@ func (pgConn *PgConn) CopyFrom(ctx context.Context, r io.Reader, sql string) (Co

buf := iobufpool.Get(65536)
defer iobufpool.Put(buf)
buf[0] = 'd'
(*buf)[0] = 'd'

var readErr, pgErr error
for pgErr == nil {
// Read chunk from r.
var n int
n, readErr = r.Read(buf[5:cap(buf)])
n, readErr = r.Read((*buf)[5:cap(*buf)])

// Send chunk to PostgreSQL.
if n > 0 {
buf = buf[0 : n+5]
pgio.SetInt32(buf[1:], int32(n+4))
*buf = (*buf)[0 : n+5]
pgio.SetInt32((*buf)[1:], int32(n+4))

writeErr := pgConn.frontend.SendUnbufferedEncodedCopyData(buf)
writeErr := pgConn.frontend.SendUnbufferedEncodedCopyData(*buf)
if writeErr != nil {
pgConn.asyncClose()
return CommandTag{}, err
Expand Down
18 changes: 9 additions & 9 deletions pgproto3/chunkreader.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
type chunkReader struct {
r io.Reader

buf []byte
buf *[]byte
rp, wp int // buf read position and write position

minBufSize int
Expand Down Expand Up @@ -45,7 +45,7 @@ func newChunkReader(r io.Reader, minBufSize int) *chunkReader {
func (r *chunkReader) Next(n int) (buf []byte, err error) {
// Reset the buffer if it is empty
if r.rp == r.wp {
if len(r.buf) != r.minBufSize {
if len(*r.buf) != r.minBufSize {
iobufpool.Put(r.buf)
r.buf = iobufpool.Get(r.minBufSize)
}
Expand All @@ -55,36 +55,36 @@ func (r *chunkReader) Next(n int) (buf []byte, err error) {

// n bytes already in buf
if (r.wp - r.rp) >= n {
buf = r.buf[r.rp : r.rp+n : r.rp+n]
buf = (*r.buf)[r.rp : r.rp+n : r.rp+n]
r.rp += n
return buf, err
}

// buf is smaller than requested number of bytes
if len(r.buf) < n {
if len(*r.buf) < n {
bigBuf := iobufpool.Get(n)
r.wp = copy(bigBuf, r.buf[r.rp:r.wp])
r.wp = copy((*bigBuf), (*r.buf)[r.rp:r.wp])
r.rp = 0
iobufpool.Put(r.buf)
r.buf = bigBuf
}

// buf is large enough, but need to shift filled area to start to make enough contiguous space
minReadCount := n - (r.wp - r.rp)
if (len(r.buf) - r.wp) < minReadCount {
r.wp = copy(r.buf, r.buf[r.rp:r.wp])
if (len(*r.buf) - r.wp) < minReadCount {
r.wp = copy((*r.buf), (*r.buf)[r.rp:r.wp])
r.rp = 0
}

// Read at least the required number of bytes from the underlying io.Reader
readBytesCount, err := io.ReadAtLeast(r.r, r.buf[r.wp:], minReadCount)
readBytesCount, err := io.ReadAtLeast(r.r, (*r.buf)[r.wp:], minReadCount)
r.wp += readBytesCount
// fmt.Println("read", n)
if err != nil {
return nil, err
}

buf = r.buf[r.rp : r.rp+n : r.rp+n]
buf = (*r.buf)[r.rp : r.rp+n : r.rp+n]
r.rp += n
return buf, nil
}

0 comments on commit eee854f

Please sign in to comment.