Skip to content

Commit

Permalink
Implement v1 in terms of v2
Browse files Browse the repository at this point in the history
Remove all the core functionality of v1 and instead implement it
in terms of v2 with the appropriate options specified.

Many v1 test cases currently failure because the v1 emulation in v2
is not a sufficient reproduction of historial v1 behavior.

The list of known failures are stored in v1/failing.txt,
which allows such failing tests to be skipped.

As the v1 emulation in v2 improves, the failing.txt list
should eventually become empty.
  • Loading branch information
dsnet committed Dec 16, 2024
1 parent a8b2846 commit 5704ce8
Show file tree
Hide file tree
Showing 20 changed files with 533 additions and 3,920 deletions.
92 changes: 0 additions & 92 deletions v1/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,8 @@ package json

import (
"bytes"
"fmt"
"io"
"reflect"
"regexp"
"runtime"
"strings"
"sync"
"testing"

"github.com/go-json-experiment/json/internal/jsontest"
Expand Down Expand Up @@ -451,67 +446,6 @@ func BenchmarkUnmapped(b *testing.B) {
})
}

func BenchmarkTypeFieldsCache(b *testing.B) {
b.ReportAllocs()
var maxTypes int = 1e6

// Dynamically generate many new types.
types := make([]reflect.Type, maxTypes)
fs := []reflect.StructField{{
Type: reflect.TypeFor[string](),
Index: []int{0},
}}
for i := range types {
fs[0].Name = fmt.Sprintf("TypeFieldsCache%d", i)
types[i] = reflect.StructOf(fs)
}

// clearClear clears the cache. Other JSON operations, must not be running.
clearCache := func() {
fieldCache = sync.Map{}
}

// MissTypes tests the performance of repeated cache misses.
// This measures the time to rebuild a cache of size nt.
for nt := 1; nt <= maxTypes; nt *= 10 {
ts := types[:nt]
b.Run(fmt.Sprintf("MissTypes%d", nt), func(b *testing.B) {
nc := runtime.GOMAXPROCS(0)
for i := 0; i < b.N; i++ {
clearCache()
var wg sync.WaitGroup
for j := 0; j < nc; j++ {
wg.Add(1)
go func(j int) {
for _, t := range ts[(j*len(ts))/nc : ((j+1)*len(ts))/nc] {
cachedTypeFields(t)
}
wg.Done()
}(j)
}
wg.Wait()
}
})
}

// HitTypes tests the performance of repeated cache hits.
// This measures the average time of each cache lookup.
for nt := 1; nt <= maxTypes; nt *= 10 {
// Pre-warm a cache of size nt.
clearCache()
for _, t := range types[:nt] {
cachedTypeFields(t)
}
b.Run(fmt.Sprintf("HitTypes%d", nt), func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cachedTypeFields(types[0])
}
})
})
}
}

func BenchmarkEncodeMarshaler(b *testing.B) {
b.ReportAllocs()

Expand Down Expand Up @@ -545,29 +479,3 @@ func BenchmarkEncoderEncode(b *testing.B) {
}
})
}

func BenchmarkNumberIsValid(b *testing.B) {
s := "-61657.61667E+61673"
for i := 0; i < b.N; i++ {
isValidNumber(s)
}
}

func BenchmarkNumberIsValidRegexp(b *testing.B) {
var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`)
s := "-61657.61667E+61673"
for i := 0; i < b.N; i++ {
jsonNumberRegexp.MatchString(s)
}
}

func BenchmarkUnmarshalNumber(b *testing.B) {
b.ReportAllocs()
data := []byte(`"-61657.61667E+61673"`)
var number Number
for i := 0; i < b.N; i++ {
if err := Unmarshal(data, &number); err != nil {
b.Fatal("Unmarshal:", err)
}
}
}
Loading

0 comments on commit 5704ce8

Please sign in to comment.