Skip to content

Commit

Permalink
meta: add SingleDelete to metamorphic test
Browse files Browse the repository at this point in the history
  • Loading branch information
sumeerbhola committed Mar 8, 2021
1 parent 070a80f commit 3c37882
Show file tree
Hide file tree
Showing 5 changed files with 189 additions and 69 deletions.
58 changes: 30 additions & 28 deletions internal/metamorphic/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ const (
writerIngest
writerMerge
writerSet
writerSingleDelete
)

type config struct {
Expand All @@ -55,33 +56,34 @@ type config struct {
var defaultConfig = config{
// dbClose is not in this list since it is deterministically generated once, at the end of the test.
ops: []int{
batchAbort: 5,
batchCommit: 5,
dbCheckpoint: 1,
dbCompact: 1,
dbFlush: 2,
dbRestart: 2,
iterClose: 5,
iterFirst: 100,
iterLast: 100,
iterNext: 100,
iterPrev: 100,
iterSeekGE: 100,
iterSeekLT: 100,
iterSeekPrefixGE: 100,
iterSetBounds: 100,
newBatch: 5,
newIndexedBatch: 5,
newIter: 10,
newIterUsingClone: 5,
newSnapshot: 10,
readerGet: 100,
snapshotClose: 10,
writerApply: 10,
writerDelete: 100,
writerDeleteRange: 50,
writerIngest: 100,
writerMerge: 100,
writerSet: 100,
batchAbort: 5,
batchCommit: 5,
dbCheckpoint: 1,
dbCompact: 1,
dbFlush: 2,
dbRestart: 2,
iterClose: 5,
iterFirst: 100,
iterLast: 100,
iterNext: 100,
iterPrev: 100,
iterSeekGE: 100,
iterSeekLT: 100,
iterSeekPrefixGE: 100,
iterSetBounds: 100,
newBatch: 5,
newIndexedBatch: 5,
newIter: 10,
newIterUsingClone: 5,
newSnapshot: 10,
readerGet: 100,
snapshotClose: 10,
writerApply: 10,
writerDelete: 100,
writerDeleteRange: 50,
writerIngest: 100,
writerMerge: 100,
writerSet: 100,
writerSingleDelete: 25,
},
}
151 changes: 110 additions & 41 deletions internal/metamorphic/generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,12 @@ type generator struct {
// keys that have been set in the DB. Used to reuse already generated keys
// during random key selection.
keys [][]byte
// singleSetKeysInDB is also in the writerToSingleSetKeys map. This tracks
// keys that are eligible to be single deleted.
singleSetKeysInDB singleSetKeysForBatch
writerToSingleSetKeys map[objID]singleSetKeysForBatch
// Ensures no duplication of single set keys for the duration of the test.
generatedWriteKeys map[string]struct{}

// Unordered sets of object IDs for live objects. Used to randomly select on
// object when generating an operation. There are 4 concrete objects: the DB
Expand Down Expand Up @@ -65,15 +71,19 @@ type generator struct {

func newGenerator(rng *rand.Rand) *generator {
g := &generator{
rng: rng,
init: &initOp{},
liveReaders: objIDSlice{makeObjID(dbTag, 0)},
liveWriters: objIDSlice{makeObjID(dbTag, 0)},
batches: make(map[objID]objIDSet),
iters: make(map[objID]objIDSet),
readers: make(map[objID]objIDSet),
snapshots: make(map[objID]objIDSet),
}
rng: rng,
init: &initOp{},
singleSetKeysInDB: makeSingleSetKeysForBatch(),
writerToSingleSetKeys: make(map[objID]singleSetKeysForBatch),
generatedWriteKeys: make(map[string]struct{}),
liveReaders: objIDSlice{makeObjID(dbTag, 0)},
liveWriters: objIDSlice{makeObjID(dbTag, 0)},
batches: make(map[objID]objIDSet),
iters: make(map[objID]objIDSet),
readers: make(map[objID]objIDSet),
snapshots: make(map[objID]objIDSet),
}
g.writerToSingleSetKeys[makeObjID(dbTag, 0)] = g.singleSetKeysInDB
// Note that the initOp fields are populated during generation.
g.ops = append(g.ops, g.init)
return g
Expand All @@ -83,34 +93,35 @@ func generate(rng *rand.Rand, count uint64, cfg config) []op {
g := newGenerator(rng)

generators := []func(){
batchAbort: g.batchAbort,
batchCommit: g.batchCommit,
dbCheckpoint: g.dbCheckpoint,
dbCompact: g.dbCompact,
dbFlush: g.dbFlush,
dbRestart: g.dbRestart,
iterClose: g.iterClose,
iterFirst: g.iterFirst,
iterLast: g.iterLast,
iterNext: g.iterNext,
iterPrev: g.iterPrev,
iterSeekGE: g.iterSeekGE,
iterSeekLT: g.iterSeekLT,
iterSeekPrefixGE: g.iterSeekPrefixGE,
iterSetBounds: g.iterSetBounds,
newBatch: g.newBatch,
newIndexedBatch: g.newIndexedBatch,
newIter: g.newIter,
newIterUsingClone: g.newIterUsingClone,
newSnapshot: g.newSnapshot,
readerGet: g.readerGet,
snapshotClose: g.snapshotClose,
writerApply: g.writerApply,
writerDelete: g.writerDelete,
writerDeleteRange: g.writerDeleteRange,
writerIngest: g.writerIngest,
writerMerge: g.writerMerge,
writerSet: g.writerSet,
batchAbort: g.batchAbort,
batchCommit: g.batchCommit,
dbCheckpoint: g.dbCheckpoint,
dbCompact: g.dbCompact,
dbFlush: g.dbFlush,
dbRestart: g.dbRestart,
iterClose: g.iterClose,
iterFirst: g.iterFirst,
iterLast: g.iterLast,
iterNext: g.iterNext,
iterPrev: g.iterPrev,
iterSeekGE: g.iterSeekGE,
iterSeekLT: g.iterSeekLT,
iterSeekPrefixGE: g.iterSeekPrefixGE,
iterSetBounds: g.iterSetBounds,
newBatch: g.newBatch,
newIndexedBatch: g.newIndexedBatch,
newIter: g.newIter,
newIterUsingClone: g.newIterUsingClone,
newSnapshot: g.newSnapshot,
readerGet: g.readerGet,
snapshotClose: g.snapshotClose,
writerApply: g.writerApply,
writerDelete: g.writerDelete,
writerDeleteRange: g.writerDeleteRange,
writerIngest: g.writerIngest,
writerMerge: g.writerMerge,
writerSet: g.writerSet,
writerSingleDelete: g.writerSingleDelete,
}

// TPCC-style deck of cards randomization. Every time the end of the deck is
Expand Down Expand Up @@ -139,6 +150,36 @@ func (g *generator) randKey(newKey float64) []byte {
return key
}

func (g *generator) randKeyForWrite(newKey float64, singleSetKey float64, writerID objID) []byte {
if n := len(g.keys); n > 0 && g.rng.Float64() > newKey {
return g.keys[g.rng.Intn(n)]
}
key := g.randValue(4, 12)
if g.rng.Float64() < singleSetKey {
for {
if _, ok := g.generatedWriteKeys[string(key)]; ok {
key = g.randValue(4, 12)
continue
}
g.generatedWriteKeys[string(key)] = struct{}{}
g.writerToSingleSetKeys[writerID].addKey(key)
break
}
} else {
g.generatedWriteKeys[string(key)] = struct{}{}
g.keys = append(g.keys, key)
}
return key
}

func (g *generator) randKeyToSingleDelete() []byte {
length := len(*g.singleSetKeysInDB.keys)
if length == 0 {
return nil
}
return g.singleSetKeysInDB.removeKey(g.rng.Intn(length))
}

// TODO(peter): make the value size configurable. See valueSizeDist in
// config.go.
func (g *generator) randValue(min, max int) []byte {
Expand Down Expand Up @@ -177,6 +218,7 @@ func (g *generator) newBatch() {
g.init.batchSlots++
g.liveBatches = append(g.liveBatches, batchID)
g.liveWriters = append(g.liveWriters, batchID)
g.writerToSingleSetKeys[batchID] = makeSingleSetKeysForBatch()

g.add(&newBatchOp{
batchID: batchID,
Expand All @@ -193,6 +235,7 @@ func (g *generator) newIndexedBatch() {
iters := make(objIDSet)
g.batches[batchID] = iters
g.readers[batchID] = iters
g.writerToSingleSetKeys[batchID] = makeSingleSetKeysForBatch()

g.add(&newIndexedBatchOp{
batchID: batchID,
Expand All @@ -214,6 +257,7 @@ func (g *generator) batchClose(batchID objID) {
delete(g.iters, id)
g.add(&closeOp{objID: id})
}
delete(g.writerToSingleSetKeys, batchID)
}

func (g *generator) batchAbort() {
Expand All @@ -233,6 +277,7 @@ func (g *generator) batchCommit() {
}

batchID := g.liveBatches.rand(g.rng)
g.writerToSingleSetKeys[batchID].transferTo(g.singleSetKeysInDB)
g.batchClose(batchID)

g.add(&batchCommitOp{
Expand Down Expand Up @@ -630,6 +675,7 @@ func (g *generator) writerApply() {
}
}

g.writerToSingleSetKeys[batchID].transferTo(g.writerToSingleSetKeys[writerID])
g.batchClose(batchID)

g.add(&applyOp{
Expand Down Expand Up @@ -680,6 +726,9 @@ func (g *generator) writerIngest() {
batchIDs := make([]objID, 0, 1+g.rng.Intn(3))
for i := 0; i < cap(batchIDs); i++ {
batchID := g.liveBatches.rand(g.rng)
// After the ingest runs, it either succeeds and the keys are in the
// DB, or it fails and these keys never make it to the DB.
g.writerToSingleSetKeys[batchID].transferTo(g.singleSetKeysInDB)
g.batchClose(batchID)
batchIDs = append(batchIDs, batchID)
if len(g.liveBatches) == 0 {
Expand All @@ -700,8 +749,9 @@ func (g *generator) writerMerge() {
writerID := g.liveWriters.rand(g.rng)
g.add(&mergeOp{
writerID: writerID,
key: g.randKey(0.2), // 20% new keys
value: g.randValue(0, 20),
// 20% new keys, and none are set once.
key: g.randKeyForWrite(0.2, 0, writerID),
value: g.randValue(0, 20),
})
g.tryRepositionBatchIters(writerID)
}
Expand All @@ -714,8 +764,27 @@ func (g *generator) writerSet() {
writerID := g.liveWriters.rand(g.rng)
g.add(&setOp{
writerID: writerID,
key: g.randKey(0.5), // 50% new keys
value: g.randValue(0, 20),
// 50% new keys, and of those 50%, half are keys that will be set
// once.
key: g.randKeyForWrite(0.5, 0.5, writerID),
value: g.randValue(0, 20),
})
g.tryRepositionBatchIters(writerID)
}

func (g *generator) writerSingleDelete() {
if len(g.liveWriters) == 0 {
return
}

writerID := g.liveWriters.rand(g.rng)
key := g.randKeyToSingleDelete()
if key == nil {
return
}
g.add(&singleDeleteOp{
writerID: writerID,
key: key,
})
g.tryRepositionBatchIters(writerID)
}
Expand Down
16 changes: 16 additions & 0 deletions internal/metamorphic/ops.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,22 @@ func (o *deleteOp) String() string {
return fmt.Sprintf("%s.Delete(%q)", o.writerID, o.key)
}

// singleDeleteOp models a Write.SingleDelete operation.
type singleDeleteOp struct {
writerID objID
key []byte
}

func (o *singleDeleteOp) run(t *test, h *history) {
w := t.getWriter(o.writerID)
err := w.SingleDelete(o.key, t.writeOpts)
h.Recordf("%s // %v", o, err)
}

func (o *singleDeleteOp) String() string {
return fmt.Sprintf("%s.SingleDelete(%q)", o.writerID, o.key)
}

// deleteRangeOp models a Write.DeleteRange operation.
type deleteRangeOp struct {
writerID objID
Expand Down
3 changes: 3 additions & 0 deletions internal/metamorphic/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ func opArgs(op op) (receiverID *objID, targetID *objID, args []interface{}) {
return &t.writerID, nil, []interface{}{&t.key, &t.value}
case *iterSetBoundsOp:
return &t.iterID, nil, []interface{}{&t.lower, &t.upper}
case *singleDeleteOp:
return &t.writerID, nil, []interface{}{&t.key}
}
panic(fmt.Sprintf("unsupported op type: %T", op))
}
Expand Down Expand Up @@ -128,6 +130,7 @@ var methods = map[string]*methodInfo{
"SeekPrefixGE": makeMethod(iterSeekPrefixGEOp{}, iterTag),
"Set": makeMethod(setOp{}, dbTag, batchTag),
"SetBounds": makeMethod(iterSetBoundsOp{}, iterTag),
"SingleDelete": makeMethod(singleDeleteOp{}, dbTag, batchTag),
}

type parser struct {
Expand Down
30 changes: 30 additions & 0 deletions internal/metamorphic/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,36 @@ func (s objIDSet) sorted() []objID {
return keys
}

// singleSetKeysForBatch tracks keys that have been set once, and hence can be
// single deleted.
type singleSetKeysForBatch struct {
keys *[][]byte
}

func makeSingleSetKeysForBatch() singleSetKeysForBatch {
var keys [][]byte
return singleSetKeysForBatch{keys: &keys}
}

// addKey is called with a key that is set once in this batch.
func (s singleSetKeysForBatch) addKey(key []byte) {
*s.keys = append(*s.keys, key)
}

// transferTo transfers all the single-set keys to a different batch/db.
func (s singleSetKeysForBatch) transferTo(to singleSetKeysForBatch) {
*to.keys = append(*to.keys, *s.keys...)
}

// removeKey removes a key that will be single deleted.
func (s singleSetKeysForBatch) removeKey(index int) []byte {
key := (*s.keys)[index]
length := len(*s.keys)
(*s.keys)[length-1], (*s.keys)[index] = (*s.keys)[index], (*s.keys)[length-1]
*s.keys = (*s.keys)[:length-1]
return key
}

// firstError returns the first non-nil error of err0 and err1, or nil if both
// are nil.
func firstError(err0, err1 error) error {
Expand Down

0 comments on commit 3c37882

Please sign in to comment.