-
Notifications
You must be signed in to change notification settings - Fork 74
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
d0def47
commit 286829a
Showing
2 changed files
with
369 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
import pytest | ||
import torch | ||
|
||
from pfhedge.instruments import KouJumpStock | ||
from pfhedge.instruments import MertonJumpStock | ||
|
||
|
||
class TestJumpStock: | ||
cls = MertonJumpStock # Defaults to MertonJumpStock | ||
|
||
def setup_method(self): | ||
self.jump_test_class = self.cls | ||
|
||
@pytest.mark.parametrize("seed", range(1)) | ||
def test_values_are_finite(self, seed, device: str = "cpu"): | ||
torch.manual_seed(seed) | ||
|
||
s = self.jump_test_class().to(device) | ||
s.simulate(n_paths=1000) | ||
|
||
assert not s.variance.isnan().any() | ||
|
||
@pytest.mark.gpu | ||
@pytest.mark.parametrize("seed", range(1)) | ||
def test_values_are_finite_gpu(self, seed): | ||
self.test_values_are_finite(seed, device="cuda") | ||
|
||
def test_repr(self): | ||
s = self.jump_test_class(cost=1e-4) | ||
# default for merton model | ||
expect = "MertonJumpStock(\ | ||
mu=0., sigma=0.2000, jump_per_year=68, jump_mean=0., jump_std=0.0100, cost=1.0000e-04, dt=0.0040)" | ||
if self.jump_test_class == KouJumpStock: | ||
expect = "KouJumpStock(\ | ||
sigma=0.2000, cost=1.0000e-04, dt=0.0040, jump_per_year=68., jump_mean_up=0.0200, jump_mean_down=0.0500, jump_up_prob=0.5000)" | ||
assert repr(s) == expect | ||
|
||
def test_simulate_shape(self, device: str = "cpu"): | ||
s = self.jump_test_class(dt=0.1).to(device) | ||
s.simulate(time_horizon=0.2, n_paths=10) | ||
assert s.spot.size() == torch.Size((10, 3)) | ||
assert s.variance.size() == torch.Size((10, 3)) | ||
|
||
s = self.jump_test_class(dt=0.1).to(device) | ||
s.simulate(time_horizon=0.25, n_paths=10) | ||
assert s.spot.size() == torch.Size((10, 4)) | ||
assert s.variance.size() == torch.Size((10, 4)) | ||
|
||
@pytest.mark.gpu | ||
def test_simulate_shape_gpu(self): | ||
self.test_simulate_shape(device="cuda") | ||
|
||
|
||
class TestMertonJumpStock(TestJumpStock): | ||
pass # default test checks merton's tests | ||
|
||
|
||
class TestKouJumpStock(TestJumpStock): | ||
cls = KouJumpStock |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,310 @@ | ||
from math import sqrt | ||
|
||
import pytest | ||
import torch | ||
from torch.testing import assert_close | ||
|
||
from pfhedge.stochastic import generate_kou_jump | ||
from pfhedge.stochastic import generate_merton_jump | ||
from pfhedge.stochastic.engine import RandnSobolBoxMuller | ||
|
||
|
||
class TestGenerateJumpStock: | ||
func = staticmethod(generate_merton_jump) # Defaults to generate_merton_jump | ||
|
||
def setup_method(self): | ||
self.jump_test_func = self.func | ||
|
||
def test_generate_brownian_mean_no_jump(self, device: str = "cpu"): | ||
# kou jump has no std | ||
if self.jump_test_func == (generate_kou_jump): | ||
return | ||
torch.manual_seed(42) | ||
n_paths = 10000 | ||
n_steps = 250 | ||
|
||
output = self.jump_test_func( | ||
n_paths, n_steps, jump_std=0.0, device=torch.device(device) | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].mean() | ||
expect = torch.ones_like(result) | ||
std = 0.2 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=3 * std, rtol=0) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_brownian_mean_no_jump_gpu(self): | ||
self.test_generate_brownian_mean_no_jump(device="cuda") | ||
|
||
|
||
def test_generate_brownian_mean_no_jump1(self, device: str = "cpu"): | ||
torch.manual_seed(42) | ||
n_paths = 10000 | ||
n_steps = 250 | ||
|
||
output = self.jump_test_func(n_paths, n_steps, jump_per_year=0.0, device=device) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].mean() | ||
expect = torch.ones_like(result) | ||
std = 0.2 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=3 * std, rtol=0) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_brownian_mean_no_jump1_gpu(self): | ||
self.test_generate_brownian_mean_no_jump1(device="cuda") | ||
|
||
|
||
def test_generate_brownian_mean_no_jump_std(self, device: str = "cpu"): | ||
# kou jump has no std | ||
if self.jump_test_func == (generate_kou_jump): | ||
return | ||
torch.manual_seed(42) | ||
n_paths = 10000 | ||
n_steps = 250 | ||
|
||
output = self.jump_test_func( | ||
n_paths, | ||
n_steps, | ||
jump_per_year=68.2, # default value | ||
jump_std=0.0, | ||
jump_mean=0.1, | ||
device=torch.device(device), | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].mean() | ||
expect = torch.ones_like(result) | ||
std = 0.4 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=3 * std, rtol=0) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_brownian_mean_no_jump_std_gpu(self): | ||
self.test_generate_brownian_mean_no_jump_std(device="cuda") | ||
|
||
|
||
def test_generate_brownian_mean(self, device: str = "cpu"): | ||
torch.manual_seed(42) | ||
n_paths = 10000 | ||
n_steps = 250 | ||
|
||
output = self.jump_test_func( | ||
n_paths, n_steps, jump_per_year=1, device=torch.device(device) | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].mean() | ||
expect = torch.ones_like(result) | ||
std = 0.2 * sqrt(1 / n_paths) + 0.3 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=3 * std, rtol=0) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_brownian_mean_gpu(self): | ||
self.test_generate_brownian_mean(device="cuda") | ||
|
||
|
||
def test_generate_jump_nosigma(self, device: str = "cpu"): | ||
torch.manual_seed(42) | ||
n_steps = 250 | ||
|
||
result = self.jump_test_func( | ||
1, n_steps, sigma=0, jump_per_year=0, device=torch.device(device) | ||
) | ||
expect = torch.ones(1, n_steps).to(device) | ||
assert_close(result, expect) | ||
|
||
mu = 0.1 | ||
dt = 0.01 | ||
result = self.jump_test_func( | ||
1, n_steps, mu=mu, sigma=0, dt=dt, jump_per_year=0, device=torch.device(device) | ||
).log() | ||
expect = torch.linspace(0, mu * dt * (n_steps - 1), n_steps).unsqueeze(0).to(device) | ||
assert_close(result, expect) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_jump_nosigma_gpu(self): | ||
self.test_generate_jump_nosigma(device="cpu") | ||
|
||
|
||
def test_generate_jump_nosigma2(self, device: str = "cpu"): | ||
# kou jump has no std | ||
if self.jump_test_func == (generate_kou_jump): | ||
return | ||
torch.manual_seed(42) | ||
n_steps = 250 | ||
|
||
result = self.jump_test_func( | ||
1, n_steps, sigma=0, jump_std=0, device=torch.device(device) | ||
) | ||
expect = torch.ones(1, n_steps).to(device) | ||
assert_close(result, expect) | ||
|
||
mu = 0.1 | ||
dt = 0.01 | ||
result = self.jump_test_func( | ||
1, n_steps, mu=mu, sigma=0, dt=dt, jump_std=0, device=torch.device(device) | ||
).log() | ||
expect = torch.linspace(0, mu * dt * (n_steps - 1), n_steps).unsqueeze(0).to(device) | ||
assert_close(result, expect) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_jump_nosigma2_gpu(self): | ||
self.test_generate_jump_nosigma2(device="cuda") | ||
|
||
|
||
def test_generate_jump_std(self, device: str = "cpu"): | ||
torch.manual_seed(42) | ||
n_paths = 10000 | ||
n_steps = 250 | ||
|
||
output = self.jump_test_func( | ||
n_paths, n_steps, jump_per_year=0, device=torch.device(device) | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].log().std() | ||
expect = torch.full_like(result, 0.2) | ||
assert_close(result, expect, atol=0, rtol=0.1) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_jump_std_gpu(self): | ||
self.test_generate_jump_std(device="cuda") | ||
|
||
|
||
def test_generate_jump_std2(self, device: str = "cpu"): | ||
# kou jump has no std | ||
if self.jump_test_func == (generate_kou_jump): | ||
return | ||
torch.manual_seed(42) | ||
n_paths = 10000 | ||
n_steps = 250 | ||
|
||
output = self.jump_test_func( | ||
n_paths, n_steps, jump_std=0, device=torch.device(device) | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].log().std() | ||
expect = torch.full_like(result, 0.2) | ||
assert_close(result, expect, atol=0, rtol=0.1) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_jump_std2_gpu(self): | ||
self.test_generate_jump_std2(device="cuda") | ||
|
||
|
||
def test_generate_jump_mean_init_state(self, device: str = "cpu"): | ||
torch.manual_seed(42) | ||
n_paths = 10000 | ||
n_steps = 250 | ||
|
||
output = self.jump_test_func( | ||
n_paths, n_steps, init_state=1.0, jump_per_year=0, device=torch.device(device) | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].mean() | ||
expect = torch.ones_like(result) | ||
std = 0.2 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=3 * std, rtol=0) | ||
|
||
output = self.jump_test_func( | ||
n_paths, | ||
n_steps, | ||
init_state=torch.tensor(1.0), | ||
jump_per_year=0, | ||
device=torch.device(device), | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].mean() | ||
expect = torch.ones_like(result) | ||
std = 0.2 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=3 * std, rtol=0) | ||
|
||
output = self.jump_test_func( | ||
n_paths, | ||
n_steps, | ||
init_state=torch.tensor([1.0]), | ||
jump_per_year=0, | ||
device=torch.device(device), | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].mean() | ||
expect = torch.ones_like(result) | ||
std = 0.2 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=3 * std, rtol=0) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_jump_mean_init_state_gpu(self): | ||
self.test_generate_jump_mean_init_state(device="cuda") | ||
|
||
|
||
def test_generate_jump_mean_mu(self, device: str = "cpu"): | ||
torch.manual_seed(42) | ||
n_paths = 10000 | ||
n_steps = 250 | ||
dt = 1 / 250 | ||
mu = 0.1 | ||
|
||
output = self.jump_test_func( | ||
n_paths, n_steps, mu=mu, jump_per_year=0, device=torch.device(device) | ||
) | ||
result = output[:, -1].mean().log() | ||
expect = torch.full_like(result, mu * dt * n_steps).to(device) | ||
std = 0.2 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=3 * std, rtol=0) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_jump_mean_mu_gpu(self): | ||
self.test_generate_jump_mean_mu(device="cuda") | ||
|
||
|
||
def test_generate_jump_dtype(self, device: str = "cpu"): | ||
torch.manual_seed(42) | ||
|
||
output = self.jump_test_func( | ||
1, 1, dtype=torch.float32, device=torch.device(device) | ||
) | ||
assert output.dtype == torch.float32 | ||
|
||
output = self.jump_test_func( | ||
1, 1, dtype=torch.float64, device=torch.device(device) | ||
) | ||
assert output.dtype == torch.float64 | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_jump_dtype_gpu(self): | ||
self.test_generate_jump_dtype(device="cuda") | ||
|
||
|
||
def test_generate_jump_sobol_mean(self, device: str = "cpu"): | ||
n_paths = 10000 | ||
n_steps = 250 | ||
|
||
engine = RandnSobolBoxMuller(seed=42, scramble=True) | ||
output = self.jump_test_func( | ||
n_paths, n_steps, engine=engine, jump_per_year=0, device=torch.device(device) | ||
) | ||
assert output.size() == torch.Size((n_paths, n_steps)) | ||
result = output[:, -1].mean() | ||
expect = torch.ones_like(result).to(device) | ||
std = 0.2 * sqrt(1 / n_paths) | ||
assert_close(result, expect, atol=10 * std, rtol=0) | ||
|
||
|
||
@pytest.mark.gpu | ||
def test_generate_jump_sobol_mean_gpu(self): | ||
self.test_generate_jump_sobol_mean(device="cuda") | ||
|
||
|
||
class TestGenerateMertonJumpStock(TestGenerateJumpStock): | ||
pass | ||
|
||
|
||
class TestGenerateKouJumpStock(TestGenerateJumpStock): | ||
func = staticmethod(generate_kou_jump) |