Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

1380 pytorch 260 #1382

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ dependencies = [
"scikit-learn",
"scipy<1.13",
"tensorboard",
"torch>=1.13.0, <2.6.0",
"torch>=1.13.0",
"tqdm",
"pymc>=5.0.0",
"zuko>=1.2.0",
Expand Down
4 changes: 2 additions & 2 deletions sbi/inference/abc/smcabc.py
Original file line number Diff line number Diff line change
Expand Up @@ -679,8 +679,8 @@
)

elif self.kernel == "uniform":
low = thetas - self.kernel_variance
high = thetas + self.kernel_variance
low = thetas - self.kernel_variance # type: ignore
high = thetas + self.kernel_variance # type: ignore

Check warning on line 683 in sbi/inference/abc/smcabc.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/abc/smcabc.py#L682-L683

Added lines #L682 - L683 were not covered by tests
# Move batch shape to event shape to get Uniform that is multivariate in
# parameter dimension.
return BoxUniform(low=low, high=high)
Expand Down
21 changes: 11 additions & 10 deletions sbi/inference/trainers/npe/npe_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@
logits_pp,
m_pp,
prec_pp,
) = proposal.posterior_estimator._posthoc_correction(default_x)
) = proposal.posterior_estimator._posthoc_correction(default_x) # type: ignore
self._logits_pp, self._m_pp, self._prec_pp = (
logits_pp.detach(),
m_pp.detach(),
Expand Down Expand Up @@ -536,7 +536,7 @@
num_samples, logits_p, m_p, prec_factors_p
)

embedded_context = self._neural_net.net._embedding_net(x)
embedded_context = self._neural_net.net._embedding_net(x) # type: ignore

Check warning on line 539 in sbi/inference/trainers/npe/npe_a.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/trainers/npe/npe_a.py#L539

Added line #L539 was not covered by tests
if embedded_context is not None:
# Merge the context dimension with sample dimension in order to
# apply the transform.
Expand All @@ -546,8 +546,9 @@
)

theta, _ = self._neural_net.net._transform.inverse(
theta, context=embedded_context
)
theta, # type: ignore
context=embedded_context,
) # type: ignore

if embedded_context is not None:
# Split the context dimension from sample dimension.
Expand All @@ -574,9 +575,9 @@
x = x.squeeze(dim=0)

# Evaluate the density estimator.
embedded_x = self._neural_net.net._embedding_net(x)
embedded_x = self._neural_net.net._embedding_net(x) # type: ignore

Check warning on line 578 in sbi/inference/trainers/npe/npe_a.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/trainers/npe/npe_a.py#L578

Added line #L578 was not covered by tests
dist = self._neural_net.net._distribution # defined to avoid black formatting.
logits_d, m_d, prec_d, _, _ = dist.get_mixture_components(embedded_x)
logits_d, m_d, prec_d, _, _ = dist.get_mixture_components(embedded_x) # type: ignore

Check warning on line 580 in sbi/inference/trainers/npe/npe_a.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/trainers/npe/npe_a.py#L580

Added line #L580 was not covered by tests
norm_logits_d = logits_d - torch.logsumexp(logits_d, dim=-1, keepdim=True)
norm_logits_d = atleast_2d(norm_logits_d)

Expand Down Expand Up @@ -704,8 +705,8 @@
prior will not be exactly have mean=0 and std=1.
"""
if self.z_score_theta:
scale = self._neural_net.net._transform._transforms[0]._scale
shift = self._neural_net.net._transform._transforms[0]._shift
scale = self._neural_net.net._transform._transforms[0]._scale # type: ignore
shift = self._neural_net.net._transform._transforms[0]._shift # type: ignore

# Following the definition of the linear transform in
# `standardizing_transform` in `sbiutils.py`:
Expand Down Expand Up @@ -739,7 +740,7 @@
"""Return potentially standardized theta if z-scoring was requested."""

if self.z_score_theta:
theta, _ = self._neural_net.net._transform(theta)
theta, _ = self._neural_net.net._transform(theta) # type: ignore

Check warning on line 743 in sbi/inference/trainers/npe/npe_a.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/trainers/npe/npe_a.py#L743

Added line #L743 was not covered by tests

return theta

Expand Down Expand Up @@ -784,7 +785,7 @@

precisions_p = precisions_d_rep - precisions_pp_rep
if isinstance(self._maybe_z_scored_prior, MultivariateNormal):
precisions_p += self._maybe_z_scored_prior.precision_matrix
precisions_p += self._maybe_z_scored_prior.precision_matrix # type: ignore

Check warning on line 788 in sbi/inference/trainers/npe/npe_a.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/trainers/npe/npe_a.py#L788

Added line #L788 was not covered by tests

# Check if precision matrix is positive definite.
for _, batches in enumerate(precisions_p):
Expand Down
6 changes: 3 additions & 3 deletions sbi/inference/trainers/npe/npe_c.py
Original file line number Diff line number Diff line change
Expand Up @@ -423,11 +423,11 @@
# Evaluate the proposal. MDNs do not have functionality to run the embedding_net
# and then get the mixture_components (**without** calling log_prob()). Hence,
# we call them separately here.
encoded_x = proposal.posterior_estimator.net._embedding_net(proposal.default_x)
encoded_x = proposal.posterior_estimator.net._embedding_net(proposal.default_x) # type: ignore

Check warning on line 426 in sbi/inference/trainers/npe/npe_c.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/trainers/npe/npe_c.py#L426

Added line #L426 was not covered by tests
dist = (
proposal.posterior_estimator.net._distribution
) # defined to avoid ugly black formatting.
logits_p, m_p, prec_p, _, _ = dist.get_mixture_components(encoded_x)
logits_p, m_p, prec_p, _, _ = dist.get_mixture_components(encoded_x) # type: ignore

Check warning on line 430 in sbi/inference/trainers/npe/npe_c.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/trainers/npe/npe_c.py#L430

Added line #L430 was not covered by tests
norm_logits_p = logits_p - torch.logsumexp(logits_p, dim=-1, keepdim=True)

# Evaluate the density estimator.
Expand Down Expand Up @@ -545,7 +545,7 @@

precisions_pp = precisions_p_rep + precisions_d_rep
if isinstance(self._maybe_z_scored_prior, MultivariateNormal):
precisions_pp -= self._maybe_z_scored_prior.precision_matrix
precisions_pp -= self._maybe_z_scored_prior.precision_matrix # type: ignore

Check warning on line 548 in sbi/inference/trainers/npe/npe_c.py

View check run for this annotation

Codecov / codecov/patch

sbi/inference/trainers/npe/npe_c.py#L548

Added line #L548 was not covered by tests

covariances_pp = torch.inverse(precisions_pp)

Expand Down
4 changes: 2 additions & 2 deletions sbi/neural_nets/estimators/flowmatching_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def embedding_net(self):

def forward(self, input: Tensor, condition: Tensor, t: Tensor) -> Tensor:
# positional encoding of time steps
t = self.freqs * t[..., None]
t = self.freqs * t[..., None] # type: ignore
t = torch.cat((t.cos(), t.sin()), dim=-1)

# embed the input and condition
Expand Down Expand Up @@ -162,5 +162,5 @@ def flow(self, condition: Tensor) -> NormalizingFlow:

return NormalizingFlow(
transform=transform,
base=DiagNormal(self.zeros, self.ones).expand(condition.shape[:-1]),
base=DiagNormal(self.zeros, self.ones).expand(condition.shape[:-1]), # type: ignore
)
2 changes: 1 addition & 1 deletion sbi/neural_nets/estimators/nflows_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@
input = input.reshape(-1, input.shape[-1])
condition = condition.reshape(-1, *self.condition_shape)

noise, _ = self.net._transorm(input, context=condition)
noise, _ = self.net._transorm(input, context=condition) # type: ignore

Check warning on line 73 in sbi/neural_nets/estimators/nflows_flow.py

View check run for this annotation

Codecov / codecov/patch

sbi/neural_nets/estimators/nflows_flow.py#L73

Added line #L73 was not covered by tests
noise = noise.reshape(batch_shape)
return noise

Expand Down
6 changes: 3 additions & 3 deletions sbi/neural_nets/estimators/score_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def approx_marginal_mean(self, times: Tensor) -> Tensor:
Returns:
Approximate marginal mean at a given time.
"""
return self.mean_t_fn(times) * self.mean_0
return self.mean_t_fn(times) * self.mean_0 # type: ignore

def approx_marginal_std(self, times: Tensor) -> Tensor:
r"""Approximate the marginal standard deviation of the target distribution at a
Expand All @@ -240,8 +240,8 @@ def approx_marginal_std(self, times: Tensor) -> Tensor:
Returns:
Approximate marginal standard deviation at a given time.
"""
vars = self.mean_t_fn(times) ** 2 * self.std_0**2 + self.std_fn(times) ** 2
return torch.sqrt(vars)
variances = self.mean_t_fn(times) ** 2 * self.std_0**2 + self.std_fn(times) ** 2 # type: ignore
return torch.sqrt(variances)

def mean_t_fn(self, times: Tensor) -> Tensor:
r"""Conditional mean function, E[xt|x0], specifying the "mean factor" at a given
Expand Down
4 changes: 2 additions & 2 deletions sbi/neural_nets/net_builders/score_nets.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,8 @@ def __init__(
)

# Initialize the last layer to zero
self.ada_ln[-1].weight.data.zero_()
self.ada_ln[-1].bias.data.zero_()
self.ada_ln[-1].weight.data.zero_() # type: ignore
self.ada_ln[-1].bias.data.zero_() # type: ignore

# MLP block
# NOTE: This can be made more flexible to support layer types.
Expand Down
2 changes: 1 addition & 1 deletion tests/vi_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ def test_pickle_support(q: str):

with tempfile.NamedTemporaryFile(suffix=".pt") as f:
torch.save(posterior, f.name)
posterior_loaded = torch.load(f.name)
posterior_loaded = torch.load(f.name, weights_only=False)
assert (posterior._x == posterior_loaded._x).all(), (
"Mhh, something with the pickled is strange"
)
Expand Down