Skip to content

Commit

Permalink
Merge branch 'master' into compression
Browse files Browse the repository at this point in the history
  • Loading branch information
methane committed Dec 1, 2024
2 parents 243b3df + 575e1b2 commit a82ca05
Show file tree
Hide file tree
Showing 20 changed files with 286 additions and 274 deletions.
16 changes: 8 additions & 8 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dominikh/[email protected]
with:
version: "2023.1.6"
- uses: dominikh/[email protected]

list:
runs-on: ubuntu-latest
Expand All @@ -31,20 +29,22 @@ jobs:
import os
go = [
# Keep the most recent production release at the top
'1.22',
'1.23',
# Older production releases
'1.22',
'1.21',
'1.20',
]
mysql = [
'9.0',
'8.4', # LTS
'8.0',
'8.3',
'5.7',
'mariadb-11.3',
'mariadb-11.4', # LTS
'mariadb-11.2',
'mariadb-11.1',
'mariadb-10.11', # LTS
'mariadb-10.6', # LTS
'mariadb-10.5',
'mariadb-10.5', # LTS
]
includes = []
Expand Down
3 changes: 3 additions & 0 deletions AUTHORS
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ Daniel Montoya <dsmontoyam at gmail.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
Dave Protasowski <dprotaso at gmail.com>
Dirkjan Bussink <d.bussink at gmail.com>
DisposaBoy <disposaboy at dby.me>
Egor Smolyakov <egorsmkv at gmail.com>
Erwan Martin <hello at erwan.io>
Expand All @@ -51,6 +52,7 @@ ICHINOSE Shogo <shogo82148 at gmail.com>
Ilia Cimpoes <ichimpoesh at gmail.com>
INADA Naoki <songofacandy at gmail.com>
Jacek Szwec <szwec.jacek at gmail.com>
Jakub Adamus <kratky at zobak.cz>
James Harr <james.harr at gmail.com>
Janek Vedock <janekvedock at comcast.net>
Jason Ng <oblitorum at gmail.com>
Expand Down Expand Up @@ -82,6 +84,7 @@ Lunny Xiao <xiaolunwen at gmail.com>
Luke Scott <luke at webconnex.com>
Maciej Zimnoch <maciej.zimnoch at codilime.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nao Yokotsuka <yokotukanao at gmail.com>
Nathanial Murphy <nathanial.murphy at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
Oliver Bone <owbone at github.com>
Expand Down
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac

## Requirements

* Go 1.20 or higher. We aim to support the 3 latest versions of Go.
* Go 1.21 or higher. We aim to support the 3 latest versions of Go.
* MySQL (5.7+) and MariaDB (10.5+) are supported.
* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
* Do not ask questions about TiDB in our issue tracker or forum.
Expand Down Expand Up @@ -539,6 +539,9 @@ This driver supports the [`ColumnType` interface](https://golang.org/pkg/databas
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.

> [!IMPORTANT]
> The `QueryContext`, `ExecContext`, etc. variants provided by `database/sql` will cause the connection to be closed if the provided context is cancelled or timed out before the result is received by the driver.

### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
Expand Down
115 changes: 45 additions & 70 deletions buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,42 +22,30 @@ const maxCachedBufSize = 256 * 1024
// In other words, we can't write and read simultaneously on the same connection.
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
// This buffer is backed by two byte slices in a double-buffering scheme
type buffer struct {
buf []byte // buf is a byte buffer who's length and capacity are equal.
nc net.Conn
idx int
length int
timeout time.Duration
dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
flipcnt uint // flipccnt is the current buffer counter for double-buffering
buf []byte // read buffer.
cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize.
nc net.Conn
timeout time.Duration
}

// newBuffer allocates and returns a new buffer.
func newBuffer(nc net.Conn) buffer {
fg := make([]byte, defaultBufSize)
return buffer{
buf: fg,
nc: nc,
dbuf: [2][]byte{fg, nil},
cachedBuf: make([]byte, defaultBufSize),
nc: nc,
}
}

// flip replaces the active buffer with the background buffer
// this is a delayed flip that simply increases the buffer counter;
// the actual flip will be performed the next time we call `buffer.fill`
func (b *buffer) flip() {
b.flipcnt += 1
// busy returns true if the read buffer is not empty.
func (b *buffer) busy() bool {
return len(b.buf) > 0
}

// fill reads into the buffer until at least _need_ bytes are in it
// fill reads into the read buffer until at least _need_ bytes are in it.
func (b *buffer) fill(need int) error {
n := b.length
// fill data into its double-buffering target: if we've called
// flip on this buffer, we'll be copying to the background buffer,
// and then filling it with network data; otherwise we'll just move
// the contents of the current buffer to the front before filling it
dest := b.dbuf[b.flipcnt&1]
// we'll move the contents of the current buffer to dest before filling it.
dest := b.cachedBuf

// grow buffer if necessary to fit the whole packet.
if need > len(dest) {
Expand All @@ -67,18 +55,13 @@ func (b *buffer) fill(need int) error {
// if the allocated buffer is not too large, move it to backing storage
// to prevent extra allocations on applications that perform large reads
if len(dest) <= maxCachedBufSize {
b.dbuf[b.flipcnt&1] = dest
b.cachedBuf = dest
}
}

// if we're filling the fg buffer, move the existing data to the start of it.
// if we're filling the bg buffer, copy over the data
if n > 0 {
copy(dest[:n], b.buf[b.idx:])
}

b.buf = dest
b.idx = 0
// move the existing data to the start of the buffer.
n := len(b.buf)
copy(dest[:n], b.buf)

for {
if b.timeout > 0 {
Expand All @@ -87,63 +70,58 @@ func (b *buffer) fill(need int) error {
}
}

nn, err := b.nc.Read(b.buf[n:])
nn, err := b.nc.Read(dest[n:])
n += nn

switch err {
case nil:
if n < need {
continue
}
b.length = n
return nil
if err == nil && n < need {
continue
}

case io.EOF:
if n >= need {
b.length = n
return nil
}
return io.ErrUnexpectedEOF
b.buf = dest[:n]

default:
return err
if err == io.EOF {
if n < need {
err = io.ErrUnexpectedEOF
} else {
err = nil
}
}
return err
}
}

// returns next N bytes from buffer.
// The returned slice is only guaranteed to be valid until the next read
func (b *buffer) readNext(need int) ([]byte, error) {
if b.length < need {
if len(b.buf) < need {
// refill
if err := b.fill(need); err != nil {
return nil, err
}
}

offset := b.idx
b.idx += need
b.length -= need
return b.buf[offset:b.idx], nil
data := b.buf[:need]
b.buf = b.buf[need:]
return data, nil
}

// takeBuffer returns a buffer with the requested size.
// If possible, a slice from the existing buffer is returned.
// Otherwise a bigger buffer is made.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeBuffer(length int) ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}

// test (cheap) general case first
if length <= cap(b.buf) {
return b.buf[:length], nil
if length <= len(b.cachedBuf) {
return b.cachedBuf[:length], nil
}

if length < maxPacketSize {
b.buf = make([]byte, length)
return b.buf, nil
if length < maxCachedBufSize {
b.cachedBuf = make([]byte, length)
return b.cachedBuf, nil
}

// buffer is larger than we want to store.
Expand All @@ -154,29 +132,26 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) {
// known to be smaller than defaultBufSize.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}
return b.buf[:length], nil
return b.cachedBuf[:length], nil
}

// takeCompleteBuffer returns the complete existing buffer.
// This can be used if the necessary buffer size is unknown.
// cap and len of the returned buffer will be equal.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}
return b.buf, nil
return b.cachedBuf, nil
}

// store stores buf, an updated buffer, if its suitable to do so.
func (b *buffer) store(buf []byte) error {
if b.length > 0 {
return ErrBusyBuffer
} else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
b.buf = buf[:cap(buf)]
func (b *buffer) store(buf []byte) {
if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) {
b.cachedBuf = buf[:cap(buf)]
}
return nil
}
2 changes: 1 addition & 1 deletion collations.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

package mysql

const defaultCollation = "utf8mb4_general_ci"
const defaultCollationID = 45 // utf8mb4_general_ci
const binaryCollationID = 63

// A list of available collations mapped to the internal ID.
Expand Down
Loading

0 comments on commit a82ca05

Please sign in to comment.