Skip to content
This repository has been archived by the owner on Feb 6, 2023. It is now read-only.

Add optimizers benchmarks #30

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 58 additions & 0 deletions benchmarks/optimizers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import six

import chainer
import cupy

from benchmarks import BenchmarkBase


class Link(chainer.Link):
def __init__(self, param):
super(Link, self).__init__()
with self.init_scope():
self.p = chainer.Parameter(param)

def __call__(self, x):
return x * self.p


class OptimizerBenchmark(BenchmarkBase):

"""The base class for benchmark of optimizers."""

# Call `test_*` methods only once as `backward()` has a side-effect.
number = 1

# Repeat the test for 10 times instead of 3 (`timeit.default_repeat`).
repeat = 10

def setup_benchmark(self, optimizer, batch_size, unit_num, dtype):
"""Performs setup of benchmark for optimizers.

Call this in `setup` method of your benchmark class.
Note that this function performs forward computation.
"""

xp = self.xp
self.optimizer = optimizer

x = xp.random.uniform(-1, 1, (batch_size, unit_num)).astype(dtype)
param = xp.random.uniform(-1, 1, unit_num).astype(dtype)
model = Link(param)
if xp is cupy:
model.to_gpu()

x = chainer.Variable(x)
y = model(x)
y.zerograd()
y.backward()
optimizer.setup(model)
optimizer.update()

def update(self, n_times):
"""Runs optimizer.update()."""

optimizer = self.optimizer

for i in six.moves.range(n_times):
optimizer.update()
21 changes: 21 additions & 0 deletions benchmarks/optimizers/ada_delta.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class AdaDelta(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.AdaDelta()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/ada_grad.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class AdaGrad(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.AdaGrad()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
23 changes: 23 additions & 0 deletions benchmarks/optimizers/adam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize(
[('dtype', [numpy.float32, numpy.float64]),
('amsgrad', [True, False])])
class Adam(OptimizerBenchmark):
def setup(self, dtype, amsgrad):
unit_num = 100000
batch_size = 32
optimizer = optimizers.Adam(0.05, amsgrad=amsgrad)

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype, amsgrad):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/corrected_momentum_sgd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class CorrectedMomentumSGD(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.CorrectedMomentumSGD()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/momentum_sgd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class MomentumSGD(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.MomentumSGD()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/msvag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class MSVAG(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.MSVAG()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/nesterov_ag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class NesterovAG(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.NesterovAG()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/rmsprop.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class RMSprop(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.RMSprop()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/rmsprop_graves.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class RMSpropGraves(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.RMSpropGraves()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/sgd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class SGD(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.SGD()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)
21 changes: 21 additions & 0 deletions benchmarks/optimizers/smorms3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import numpy

from chainer import optimizers

from benchmarks.optimizers import OptimizerBenchmark
from benchmarks.utils import backends
from benchmarks.utils import parameterize


@backends('gpu', 'cpu')
@parameterize([('dtype', [numpy.float32, numpy.float64])])
class SMORMS3(OptimizerBenchmark):
def setup(self, dtype):
unit_num = 100000
batch_size = 32
optimizer = optimizers.SMORMS3()

self.setup_benchmark(optimizer, batch_size, unit_num, dtype)

def time_update(self, dtype):
self.update(1000)