Skip to content

Commit

Permalink
restore merton tests
Browse files Browse the repository at this point in the history
  • Loading branch information
rhandal-pfn committed Aug 29, 2024
1 parent 686f40f commit e35d351
Show file tree
Hide file tree
Showing 2 changed files with 324 additions and 0 deletions.
41 changes: 41 additions & 0 deletions tests/instruments/primary/test_merton_jump.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import pytest
import torch

from pfhedge.instruments import MertonJumpStock


class TestMertonJumpStock:
@pytest.mark.parametrize("seed", range(1))
def test_values_are_finite(self, seed, device: str = "cpu"):
torch.manual_seed(seed)

s = MertonJumpStock().to(device)
s.simulate(n_paths=1000)

assert not s.variance.isnan().any()

@pytest.mark.gpu
@pytest.mark.parametrize("seed", range(1))
def test_values_are_finite_gpu(self, seed):
self.test_values_are_finite(seed, device="cuda")

def test_repr(self):
s = MertonJumpStock(cost=1e-4)
expect = "MertonJumpStock(\
mu=0., sigma=0.2000, jump_per_year=68, jump_mean=0., jump_std=0.0100, cost=1.0000e-04, dt=0.0040)"
assert repr(s) == expect

def test_simulate_shape(self, device: str = "cpu"):
s = MertonJumpStock(dt=0.1).to(device)
s.simulate(time_horizon=0.2, n_paths=10)
assert s.spot.size() == torch.Size((10, 3))
assert s.variance.size() == torch.Size((10, 3))

s = MertonJumpStock(dt=0.1).to(device)
s.simulate(time_horizon=0.25, n_paths=10)
assert s.spot.size() == torch.Size((10, 4))
assert s.variance.size() == torch.Size((10, 4))

@pytest.mark.gpu
def test_simulate_shape_gpu(self):
self.test_simulate_shape(device="cuda")
283 changes: 283 additions & 0 deletions tests/stochastic/test_merton_jump.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,283 @@
from math import sqrt

import pytest
import torch
from torch.testing import assert_close

from pfhedge.stochastic import generate_merton_jump
from pfhedge.stochastic.engine import RandnSobolBoxMuller


def test_generate_brownian_mean_no_jump(device: str = "cpu"):
torch.manual_seed(42)
n_paths = 10000
n_steps = 250

output = generate_merton_jump(
n_paths, n_steps, jump_std=0.0, device=torch.device(device)
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].mean()
expect = torch.ones_like(result)
std = 0.2 * sqrt(1 / n_paths)
assert_close(result, expect, atol=3 * std, rtol=0)


@pytest.mark.gpu
def test_generate_brownian_mean_no_jump_gpu():
test_generate_brownian_mean_no_jump(device="cuda")


def test_generate_brownian_mean_no_jump1(device: str = "cpu"):
torch.manual_seed(42)
n_paths = 10000
n_steps = 250

output = generate_merton_jump(n_paths, n_steps, jump_per_year=0.0, device=device)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].mean()
expect = torch.ones_like(result)
std = 0.2 * sqrt(1 / n_paths)
assert_close(result, expect, atol=3 * std, rtol=0)


@pytest.mark.gpu
def test_generate_brownian_mean_no_jump1_gpu():
test_generate_brownian_mean_no_jump1(device="cuda")


def test_generate_brownian_mean_no_jump_std(device: str = "cpu"):
torch.manual_seed(42)
n_paths = 10000
n_steps = 250

output = generate_merton_jump(
n_paths,
n_steps,
jump_per_year=68.2, # default value
jump_std=0.0,
jump_mean=0.1,
device=torch.device(device),
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].mean()
expect = torch.ones_like(result)
std = 0.4 * sqrt(1 / n_paths)
assert_close(result, expect, atol=3 * std, rtol=0)


@pytest.mark.gpu
def test_generate_brownian_mean_no_jump_std_gpu():
test_generate_brownian_mean_no_jump_std(device="cuda")


def test_generate_brownian_mean(device: str = "cpu"):
torch.manual_seed(42)
n_paths = 10000
n_steps = 250

output = generate_merton_jump(
n_paths, n_steps, jump_per_year=1, device=torch.device(device)
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].mean()
expect = torch.ones_like(result)
std = 0.2 * sqrt(1 / n_paths) + 0.3 * sqrt(1 / n_paths)
assert_close(result, expect, atol=3 * std, rtol=0)


@pytest.mark.gpu
def test_generate_brownian_mean_gpu():
test_generate_brownian_mean(device="cuda")


def test_generate_merton_jump_nosigma(device: str = "cpu"):
torch.manual_seed(42)
n_steps = 250

result = generate_merton_jump(
1, n_steps, sigma=0, jump_per_year=0, device=torch.device(device)
)
expect = torch.ones(1, n_steps).to(device)
assert_close(result, expect)

mu = 0.1
dt = 0.01
result = generate_merton_jump(
1, n_steps, mu=mu, sigma=0, dt=dt, jump_per_year=0, device=torch.device(device)
).log()
expect = torch.linspace(0, mu * dt * (n_steps - 1), n_steps).unsqueeze(0).to(device)
assert_close(result, expect)


@pytest.mark.gpu
def test_generate_merton_jump_nosigma_gpu():
test_generate_merton_jump_nosigma(device="cpu")


def test_generate_merton_jump_nosigma2(device: str = "cpu"):
torch.manual_seed(42)
n_steps = 250

result = generate_merton_jump(
1, n_steps, sigma=0, jump_std=0, device=torch.device(device)
)
expect = torch.ones(1, n_steps).to(device)
assert_close(result, expect)

mu = 0.1
dt = 0.01
result = generate_merton_jump(
1, n_steps, mu=mu, sigma=0, dt=dt, jump_std=0, device=torch.device(device)
).log()
expect = torch.linspace(0, mu * dt * (n_steps - 1), n_steps).unsqueeze(0).to(device)
assert_close(result, expect)


@pytest.mark.gpu
def test_generate_merton_jump_nosigma2_gpu():
test_generate_merton_jump_nosigma2(device="cuda")


def test_generate_merton_jump_std(device: str = "cpu"):
torch.manual_seed(42)
n_paths = 10000
n_steps = 250

output = generate_merton_jump(
n_paths, n_steps, jump_per_year=0, device=torch.device(device)
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].log().std()
expect = torch.full_like(result, 0.2)
assert_close(result, expect, atol=0, rtol=0.1)


@pytest.mark.gpu
def test_generate_merton_jump_std_gpu():
test_generate_merton_jump_std(device="cuda")


def test_generate_merton_jump_std2(device: str = "cpu"):
torch.manual_seed(42)
n_paths = 10000
n_steps = 250

output = generate_merton_jump(
n_paths, n_steps, jump_std=0, device=torch.device(device)
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].log().std()
expect = torch.full_like(result, 0.2)
assert_close(result, expect, atol=0, rtol=0.1)


@pytest.mark.gpu
def test_generate_merton_jump_std2_gpu():
test_generate_merton_jump_std2(device="cuda")


def test_generate_merton_jump_mean_init_state(device: str = "cpu"):
torch.manual_seed(42)
n_paths = 10000
n_steps = 250

output = generate_merton_jump(
n_paths, n_steps, init_state=1.0, jump_per_year=0, device=torch.device(device)
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].mean()
expect = torch.ones_like(result)
std = 0.2 * sqrt(1 / n_paths)
assert_close(result, expect, atol=3 * std, rtol=0)

output = generate_merton_jump(
n_paths,
n_steps,
init_state=torch.tensor(1.0),
jump_per_year=0,
device=torch.device(device),
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].mean()
expect = torch.ones_like(result)
std = 0.2 * sqrt(1 / n_paths)
assert_close(result, expect, atol=3 * std, rtol=0)

output = generate_merton_jump(
n_paths,
n_steps,
init_state=torch.tensor([1.0]),
jump_per_year=0,
device=torch.device(device),
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].mean()
expect = torch.ones_like(result)
std = 0.2 * sqrt(1 / n_paths)
assert_close(result, expect, atol=3 * std, rtol=0)


@pytest.mark.gpu
def test_generate_merton_jump_mean_init_state_gpu():
test_generate_merton_jump_mean_init_state(device="cuda")


def test_generate_merton_jump_mean_mu(device: str = "cpu"):
torch.manual_seed(42)
n_paths = 10000
n_steps = 250
dt = 1 / 250
mu = 0.1

output = generate_merton_jump(
n_paths, n_steps, mu=mu, jump_per_year=0, device=torch.device(device)
)
result = output[:, -1].mean().log()
expect = torch.full_like(result, mu * dt * n_steps).to(device)
std = 0.2 * sqrt(1 / n_paths)
assert_close(result, expect, atol=3 * std, rtol=0)


@pytest.mark.gpu
def test_generate_merton_jump_mean_mu_gpu():
test_generate_merton_jump_mean_mu(device="cuda")


def test_generate_merton_jump_dtype(device: str = "cpu"):
torch.manual_seed(42)

output = generate_merton_jump(
1, 1, dtype=torch.float32, device=torch.device(device)
)
assert output.dtype == torch.float32

output = generate_merton_jump(
1, 1, dtype=torch.float64, device=torch.device(device)
)
assert output.dtype == torch.float64


@pytest.mark.gpu
def test_generate_merton_jump_dtype_gpu():
test_generate_merton_jump_dtype(device="cuda")


def test_generate_merton_jump_sobol_mean(device: str = "cpu"):
n_paths = 10000
n_steps = 250

engine = RandnSobolBoxMuller(seed=42, scramble=True)
output = generate_merton_jump(
n_paths, n_steps, engine=engine, jump_per_year=0, device=torch.device(device)
)
assert output.size() == torch.Size((n_paths, n_steps))
result = output[:, -1].mean()
expect = torch.ones_like(result).to(device)
std = 0.2 * sqrt(1 / n_paths)
assert_close(result, expect, atol=10 * std, rtol=0)


@pytest.mark.gpu
def test_generate_merton_jump_sobol_mean_gpu():
test_generate_merton_jump_sobol_mean(device="cuda")

0 comments on commit e35d351

Please sign in to comment.