diff --git a/exp/mdbxpool/txnpool.go b/exp/mdbxpool/txnpool.go index 5d97c19..81f831d 100644 --- a/exp/mdbxpool/txnpool.go +++ b/exp/mdbxpool/txnpool.go @@ -57,8 +57,8 @@ type TxnPool struct { // used concurrently. UpdateHandling UpdateHandling - lastid uintptr - idleGuard uintptr + lastid uint64 + idleGuard uint64 } // NewTxnPool initializes returns a new TxnPool. @@ -187,8 +187,8 @@ func (p *TxnPool) handleReadonly(txn *mdbx.Txn, condition UpdateHandling) (renew // getLastID safely retrieves the value of p.lastid so routines operating on // the sync.Pool know if a transaction can continue to be used without bloating // the database. -func (p *TxnPool) getLastID() uintptr { - return atomic.LoadUintptr(&p.lastid) +func (p *TxnPool) getLastID() uint64 { + return atomic.LoadUint64(&p.lastid) } // CommitID sets the TxnPool's last-known transaction id to invalidate @@ -196,7 +196,7 @@ func (p *TxnPool) getLastID() uintptr { // // CommitID should only be called if p is not used to create/commit update // transactions. -func (p *TxnPool) CommitID(id uintptr) { +func (p *TxnPool) CommitID(id uint64) { if !p.handlesUpdates() { return } @@ -205,13 +205,13 @@ func (p *TxnPool) CommitID(id uintptr) { // As long as we think we are holding a newer id than lastid we keep trying // to CAS until we see a newer id or the CAS succeeds. - lastid := atomic.LoadUintptr(&p.lastid) + lastid := atomic.LoadUint64(&p.lastid) for lastid < id { - if atomic.CompareAndSwapUintptr(&p.lastid, lastid, id) { + if atomic.CompareAndSwapUint64(&p.lastid, lastid, id) { updated = true break } - lastid = atomic.LoadUintptr(&p.lastid) + lastid = atomic.LoadUint64(&p.lastid) } if updated && p.UpdateHandling&HandleIdle != 0 { @@ -239,12 +239,12 @@ func (p *TxnPool) handleIdle() { // one will probably do the work of the waiting one. So we just attempt to // CAS a guarding value and continue if the we succeeded (ensuring that we // reset the value with defer). - if !atomic.CompareAndSwapUintptr(&p.idleGuard, 0, 1) { + if !atomic.CompareAndSwapUint64(&p.idleGuard, 0, 1) { return } // Don't CAS when we reset. Just reset. It will make sure that handleIdle // can run again. - defer atomic.StoreUintptr(&p.idleGuard, 0) + defer atomic.StoreUint64(&p.idleGuard, 0) var txnPutBack *mdbx.Txn for { @@ -301,7 +301,7 @@ func (p *TxnPool) Abort(txn *mdbx.Txn) { // Update is analogous to the Update method on mdbx.Env. func (p *TxnPool) Update(fn mdbx.TxnOp) error { - var id uintptr + var id uint64 err := p.env.Update(func(txn *mdbx.Txn) (err error) { err = fn(txn) if err != nil { diff --git a/mdbx/txn.go b/mdbx/txn.go index 249a3e9..3fd253c 100644 --- a/mdbx/txn.go +++ b/mdbx/txn.go @@ -73,7 +73,7 @@ type Txn struct { // The value of Txn.ID() is cached so that the cost of cgo does not have to // be paid. The id of a Txn cannot change over its life, even if it is // reset/renewed - id uintptr + id uint64 } // beginTxn does not lock the OS thread which is a prerequisite for creating a @@ -118,7 +118,7 @@ func beginTxn(env *Env, parent *Txn, flags uint) (*Txn, error) { // view transactions. // // See mdbx_txn_id. -func (txn *Txn) ID() uintptr { +func (txn *Txn) ID() uint64 { // It is possible for a txn to legitimately have ID 0 if it a readonly txn // created before any updates. In practice this does not really happen // because an application typically must do an initial update to initialize @@ -131,8 +131,8 @@ func (txn *Txn) ID() uintptr { return txn.id } -func (txn *Txn) getID() uintptr { - return uintptr(C.mdbx_txn_id(txn._txn)) +func (txn *Txn) getID() uint64 { + return uint64(C.mdbx_txn_id(txn._txn)) } // RunOp executes fn with txn as an argument. During the execution of fn no diff --git a/mdbx/txn_test.go b/mdbx/txn_test.go index e33cc19..058ad33 100644 --- a/mdbx/txn_test.go +++ b/mdbx/txn_test.go @@ -13,7 +13,7 @@ import ( func TestTxn_ID(t *testing.T) { env := setup(t) - var id0, id1, id2, id3 uintptr + var id0, id1, id2, id3 uint64 var txnInvalid *Txn err := env.View(func(txn *Txn) (err error) { id0 = txn.ID()