Skip to content

Commit

Permalink
Merge pull request #60 from masanorihirano/release/0.1.6
Browse files Browse the repository at this point in the history
Release/0.1.6
  • Loading branch information
masanorihirano authored Jun 6, 2022
2 parents 7003f7e + 9591545 commit 5b749b0
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 2 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pytorch_extra_mhirano"
version = "0.1.5"
version = "0.1.6"
description = ""
authors = ["Masanori HIRANO <[email protected]>"]
license = "MIT"
Expand Down
3 changes: 3 additions & 0 deletions pytorch_extra_mhirano/experimental/variance_decomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,9 @@ def update_param(
sample_coefficient: torch.Tensor,
inputs: torch.Tensor,
) -> None:
sample_intercept = sample_intercept.detach()
sample_coefficient = sample_coefficient.detach()
inputs = inputs.detach()
if self.zero_intercept:
Ai = sample_coefficient.reshape(self.params_dim_for_solver, 1)
else:
Expand Down
2 changes: 1 addition & 1 deletion pytorch_extra_mhirano/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.1.5"
__version__ = "0.1.6"
Original file line number Diff line number Diff line change
Expand Up @@ -180,3 +180,60 @@ def test2_gpu(
n_batch=n_batch,
device=torch.device("cuda"),
)

@pytest.mark.parametrize(
"size", [(4, None, 2), (8, 2, 3), (5, None, 3), (33, 5, 6)]
)
@pytest.mark.parametrize("zero_intercept", [True, False])
@pytest.mark.parametrize("n_batch", [2, 10, 100])
def test_grad(
self,
size: Tuple[int, Optional[int], int],
zero_intercept: bool,
n_batch: int,
device: torch.device = torch.device("cpu"),
) -> None:
batch_size, input_len, input_dim = size
vd = VarianceDecomposition(
inputs_dim=input_dim, inputs_len=input_len, zero_intercept=zero_intercept
)
assert vd.inputs_len == input_len
assert vd.inputs_dim == input_dim
vd.to(device)
layer1 = torch.nn.Linear(input_dim, input_dim).to(device)
optimizer = torch.optim.Adam(layer1.parameters())
torch.manual_seed(42)
inputs_all = []
targets_all = []
for _ in range(n_batch):
_size: List[int] = [x for x in size if x]
inputs = torch.rand(_size)
target = torch.rand([size[0], 1])
inputs_all.append(inputs)
targets_all.append(target)
for inputs, target in zip(inputs_all, targets_all):
inputs = inputs.to(device)
target = target.to(device)
inputs = layer1.forward(inputs)
res, pred = vd.forward(inputs=inputs, targets=target)
if res is None:
raise AssertionError
optimizer.zero_grad()
res.square().sum().backward()
optimizer.step()

@pytest.mark.gpu
@pytest.mark.parametrize(
"size", [(4, None, 2), (8, 2, 3), (5, None, 3), (33, 5, 6)]
)
@pytest.mark.parametrize("zero_intercept", [True, False])
@pytest.mark.parametrize("n_batch", [2, 10, 100, 1000])
def test_grad_gpu(
self, size: Tuple[int, Optional[int], int], zero_intercept: bool, n_batch: int
) -> None:
self.test_grad(
size=size,
zero_intercept=zero_intercept,
n_batch=n_batch,
device=torch.device("cuda"),
)

0 comments on commit 5b749b0

Please sign in to comment.