Skip to content

Commit

Permalink
Merge pull request #79 from r9y9/fix-tests
Browse files Browse the repository at this point in the history
Fix tests
  • Loading branch information
r9y9 authored Dec 25, 2018
2 parents e705991 + 517d160 commit 6b56b93
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 52 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ before_install:
- conda create -q -n test-environment "python=$TRAVIS_PYTHON_VERSION" $deps -c pytorch
- source activate test-environment
- pip install codecov flake8
- pip install scikit-learn==0.20.0
- pip install scikit-learn

install:
- pip install -e ".[test]"
Expand Down
4 changes: 3 additions & 1 deletion docs/changelog.rst
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
Change log
==========

v0.0.17 <2018-xx-xx>
v0.0.17 <2018-12-25>
--------------------

- `#79`_: ModSpec update for Pytorch 1.0.0. Fix deprecations.
- `#40`_: Add logo
- `#76`_: MLPG update for PyTorch 1.0.0.

Expand Down Expand Up @@ -162,3 +163,4 @@ v0.0.1 <2017-08-14>
.. _#72: https://github.com/r9y9/nnmnkwii/pull/72
.. _#73: https://github.com/r9y9/nnmnkwii/pull/73
.. _#76: https://github.com/r9y9/nnmnkwii/pull/76
.. _#79: https://github.com/r9y9/nnmnkwii/pull/79
47 changes: 23 additions & 24 deletions nnmnkwii/autograd/_impl/modspec.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,48 +10,47 @@
class ModSpec(Function):
"""Modulation spectrum computation ``f : (T, D) -> (N//2+1, D)``.
Attributes:
Args:
n (int): DFT length.
norm (bool): Normalize DFT output or not. See :obj:`numpy.fft.fft`.
"""

def __init__(self, n=2048, norm=None):
self.n = n
self.norm = norm

def forward(self, y):
@staticmethod
def forward(ctx, y, n, norm):
ctx.n = n
ctx.norm = norm
assert y.dim() == 2
self.save_for_backward(y)
ctx.save_for_backward(y)

y_np = y.detach().numpy()
ms = torch.from_numpy(_modspec(y_np, n=self.n, norm=self.norm))
ms = torch.from_numpy(_modspec(y_np, n=n, norm=norm))

return ms

def backward(self, grad_output):
y, = self.saved_tensors
@staticmethod
def backward(ctx, grad_output):
y, = ctx.saved_tensors
T, D = y.size()
assert grad_output.size() == torch.Size((self.n // 2 + 1, D))
assert grad_output.size() == torch.Size((ctx.n // 2 + 1, D))

y_np = y.detach().numpy()
kt = -2 * np.pi / self.n * np.arange(self.n // 2 +
1)[:, None] * np.arange(T)
kt = -2 * np.pi / ctx.n * np.arange(ctx.n // 2 +
1)[:, None] * np.arange(T)

assert kt.shape == (self.n // 2 + 1, T)
assert kt.shape == (ctx.n // 2 + 1, T)
cos_table = np.cos(kt)
sin_table = np.sin(kt)

R = np.zeros((self.n // 2 + 1, D))
I = np.zeros((self.n // 2 + 1, D))
s_complex = np.fft.rfft(y_np, n=self.n, axis=0,
norm=self.norm) # DFT against time axis
assert s_complex.shape == (self.n // 2 + 1, D)
R = np.zeros((ctx.n // 2 + 1, D))
I = np.zeros((ctx.n // 2 + 1, D))
s_complex = np.fft.rfft(y_np, n=ctx.n, axis=0,
norm=ctx.norm) # DFT against time axis
assert s_complex.shape == (ctx.n // 2 + 1, D)
R, I = s_complex.real, s_complex.imag

grads = torch.zeros(T, D)
C = 2 # normalization constant
if self.norm == "ortho":
C /= np.sqrt(self.n)
if ctx.norm == "ortho":
C /= np.sqrt(ctx.n)

for d in range(D):
r = R[:, d][:, None]
Expand All @@ -61,7 +60,7 @@ def backward(self, grad_output):
grads[:, d] = torch.from_numpy(
grad_output[:, d].numpy().T.dot(grad))

return grads
return grads, None, None


def modspec(y, n=2048, norm=None):
Expand All @@ -73,4 +72,4 @@ def modspec(y, n=2048, norm=None):
norm (bool): Normalize DFT output or not. See :obj:`numpy.fft.fft`.
"""
return ModSpec(n=n, norm=norm)(y)
return ModSpec.apply(y, n, norm)
42 changes: 17 additions & 25 deletions tests/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from nnmnkwii import autograd as AF

from torch.autograd import gradcheck
from torch.autograd import Variable
from torch import nn
import torch
import numpy as np
Expand Down Expand Up @@ -46,9 +45,10 @@ def test_functional_mlpg():
variances = torch.ones(static_dim * len(windows))

y = G.mlpg(means.numpy(), variances.numpy(), windows)
y = Variable(torch.from_numpy(y), requires_grad=False)
y = torch.from_numpy(y)

means = Variable(means, requires_grad=True)
means = means.clone()
means.requires_grad = True

# mlpg
y_hat = AF.mlpg(means, variances, windows)
Expand Down Expand Up @@ -79,14 +79,13 @@ def test_unit_variance_mlpg_gradcheck():
for windows in _get_windows_set():
torch.manual_seed(1234)
# Meens, input for MLPG
means = Variable(torch.rand(T, static_dim * len(windows)),
requires_grad=True)
means = torch.rand(T, static_dim * len(windows), requires_grad=True)

# Input for UnitVarianceMLPG
reshaped_means = G.reshape_means(
means.data.clone().numpy(), static_dim)
reshaped_means = Variable(torch.from_numpy(reshaped_means),
requires_grad=True)
reshaped_means = torch.from_numpy(reshaped_means)
reshaped_means.requires_grad = True

# Compute MLPG matrix
R = G.unit_variance_mlpg_matrix(windows, T).astype(np.float32)
Expand Down Expand Up @@ -135,15 +134,13 @@ def test_minibatch_unit_variance_mlpg_gradcheck():

# Target
y = G.mlpg(means.numpy(), np.ones(static_dim * len(windows)), windows)
y = Variable(torch.from_numpy(y), requires_grad=False)
y = torch.from_numpy(y)
y_expanded = y.expand(batch_size, y.size(0), y.size(1))

# Pack into variables
means = Variable(means, requires_grad=True)
means_expanded = Variable(means_expanded, requires_grad=True)
reshaped_means = Variable(reshaped_means, requires_grad=True)
reshaped_means_expanded = Variable(
reshaped_means_expanded, requires_grad=True)
means.requires_grad = True
means_expanded.requires_grad = True
reshaped_means.requires_grad = True
reshaped_means_expanded.requires_grad = True

# Case 1: 2d with reshaped means
R = torch.from_numpy(G.unit_variance_mlpg_matrix(windows, T))
Expand Down Expand Up @@ -191,8 +188,7 @@ def test_mlpg_gradcheck():

for windows in _get_windows_set():
torch.manual_seed(1234)
means = Variable(torch.rand(T, static_dim * len(windows)),
requires_grad=True)
means = torch.rand(T, static_dim * len(windows), requires_grad=True)

# Unit variances case
variances = torch.ones(static_dim * len(windows)
Expand All @@ -217,8 +213,7 @@ def test_mlpg_variance_expand():

for windows in _get_windows_set():
torch.manual_seed(1234)
means = Variable(torch.rand(T, static_dim * len(windows)),
requires_grad=True)
means = torch.rand(T, static_dim * len(windows), requires_grad=True)
variances = torch.rand(static_dim * len(windows))
variances_expanded = variances.expand(T, static_dim * len(windows))
y = AF.mlpg(means, variances, windows)
Expand All @@ -231,21 +226,18 @@ def test_modspec_gradcheck():
static_dim = 12
T = 16
torch.manual_seed(1234)
inputs = (Variable(torch.rand(T, static_dim), requires_grad=True),)
n = 16

for norm in [None, "ortho"]:
assert gradcheck(ModSpec(n=n, norm=norm), inputs, eps=1e-4, atol=1e-4)
inputs = (torch.rand(T, static_dim, requires_grad=True), n, norm)
assert gradcheck(ModSpec.apply, inputs, eps=1e-4, atol=1e-4)


@attr("modspec")
def test_modspec_gradcheck_large_n():
static_dim = 12
T = 16
torch.manual_seed(1234)
inputs = (Variable(torch.rand(T, static_dim), requires_grad=True),)

for n in [16, 32]:
for norm in [None, "ortho"]:
assert gradcheck(ModSpec(n=n, norm=norm),
inputs, eps=1e-4, atol=1e-4)
inputs = (torch.rand(T, static_dim, requires_grad=True), n, norm)
assert gradcheck(ModSpec.apply, inputs, eps=1e-4, atol=1e-4)
2 changes: 1 addition & 1 deletion tests/test_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def test_meanvar():
# Inverse transform
x = X[0]
x_hat = P.inv_scale(P.scale(x, X_mean, X_std), X_mean, X_std)
assert np.allclose(x, x_hat, atol=1e-7)
assert np.allclose(x, x_hat, atol=1e-5)


def test_minmax():
Expand Down

0 comments on commit 6b56b93

Please sign in to comment.