diff --git a/Project.toml b/Project.toml index 8ce5c7280..80c9d999b 100644 --- a/Project.toml +++ b/Project.toml @@ -51,7 +51,7 @@ AdvancedMH = "0.6.8, 0.7" AdvancedPS = "0.4" AdvancedVI = "0.2" BangBang = "0.3" -Bijectors = "0.12" +Bijectors = "0.13.6" DataStructures = "0.18" Distributions = "0.23.3, 0.24, 0.25" DistributionsAD = "0.6" diff --git a/test/contrib/inference/abstractmcmc.jl b/test/contrib/inference/abstractmcmc.jl index b2b270e66..689687261 100644 --- a/test/contrib/inference/abstractmcmc.jl +++ b/test/contrib/inference/abstractmcmc.jl @@ -41,7 +41,7 @@ function initialize_mh(model) end @testset "External samplers" begin - @testset "AdvancedHMC.jl" begin + @turing_testset "AdvancedHMC.jl" begin for model in DynamicPPL.TestUtils.DEMO_MODELS # Need some functionality to initialize the sampler. # TODO: Remove this once the constructors in the respective packages become "lazy". @@ -52,12 +52,13 @@ end 5_000; n_adapts=1_000, discard_initial=1_000, - rtol=0.2 + rtol=0.2, + sampler_name="AdvancedHMC" ) end end - @testset "AdvancedMH.jl" begin + @turing_testset "AdvancedMH.jl" begin for model in DynamicPPL.TestUtils.DEMO_MODELS # Need some functionality to initialize the sampler. # TODO: Remove this once the constructors in the respective packages become "lazy". @@ -68,7 +69,8 @@ end 10_000; discard_initial=1_000, thinning=10, - rtol=0.2 + rtol=0.2, + sampler_name="AdvancedMH" ) end end diff --git a/test/essential/ad.jl b/test/essential/ad.jl index 8a0241a83..c00f76f12 100644 --- a/test/essential/ad.jl +++ b/test/essential/ad.jl @@ -84,8 +84,6 @@ @model function dir() theta ~ Dirichlet(1 ./ fill(4, 4)) end - Turing.setadbackend(:tracker) - sample(dir(), HMC(0.01, 1), 1000); Turing.setadbackend(:zygote) sample(dir(), HMC(0.01, 1), 1000) Turing.setadbackend(:reversediff) @@ -99,8 +97,6 @@ @model function wishart() theta ~ Wishart(4, Matrix{Float64}(I, 4, 4)) end - Turing.setadbackend(:tracker) - sample(wishart(), HMC(0.01, 1), 1000); Turing.setadbackend(:reversediff) sample(wishart(), HMC(0.01, 1), 1000); Turing.setadbackend(:zygote) @@ -109,8 +105,6 @@ @model function invwishart() theta ~ InverseWishart(4, Matrix{Float64}(I, 4, 4)) end - Turing.setadbackend(:tracker) - sample(invwishart(), HMC(0.01, 1), 1000); Turing.setadbackend(:reversediff) sample(invwishart(), HMC(0.01, 1), 1000); Turing.setadbackend(:zygote) diff --git a/test/modes/OptimInterface.jl b/test/modes/OptimInterface.jl index ea873ffee..2418037a4 100644 --- a/test/modes/OptimInterface.jl +++ b/test/modes/OptimInterface.jl @@ -1,38 +1,3 @@ -# TODO: Remove these once the equivalent is present in `DynamicPPL.TestUtils. -function likelihood_optima(::DynamicPPL.TestUtils.UnivariateAssumeDemoModels) - return (s=1/16, m=7/4) -end -function posterior_optima(::DynamicPPL.TestUtils.UnivariateAssumeDemoModels) - # TODO: Figure out exact for `s`. - return (s=0.907407, m=7/6) -end - -function likelihood_optima(model::DynamicPPL.TestUtils.MultivariateAssumeDemoModels) - # Get some containers to fill. - vals = Random.rand(model) - - # NOTE: These are "as close to zero as we can get". - vals.s[1] = 1e-32 - vals.s[2] = 1e-32 - - vals.m[1] = 1.5 - vals.m[2] = 2.0 - - return vals -end -function posterior_optima(model::DynamicPPL.TestUtils.MultivariateAssumeDemoModels) - # Get some containers to fill. - vals = Random.rand(model) - - # TODO: Figure out exact for `s[1]`. - vals.s[1] = 0.890625 - vals.s[2] = 1 - vals.m[1] = 3/4 - vals.m[2] = 1 - - return vals -end - # Used for testing how well it works with nested contexts. struct OverrideContext{C,T1,T2} <: DynamicPPL.AbstractContext context::C @@ -57,7 +22,7 @@ function DynamicPPL.tilde_observe(context::OverrideContext, right, left, vi) return context.loglikelihood_weight, vi end -@testset "OptimInterface.jl" begin +@numerical_testset "OptimInterface.jl" begin @testset "MLE" begin Random.seed!(222) true_value = [0.0625, 1.75] @@ -157,7 +122,7 @@ end # FIXME: Some models doesn't work for Tracker and ReverseDiff. if Turing.Essential.ADBACKEND[] === :forwarddiff @testset "MAP for $(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS - result_true = posterior_optima(model) + result_true = DynamicPPL.TestUtils.posterior_optima(model) @testset "$(nameof(typeof(optimizer)))" for optimizer in [LBFGS(), NelderMead()] result = optimize(model, MAP(), optimizer) @@ -188,7 +153,7 @@ end DynamicPPL.TestUtils.demo_dot_assume_matrix_dot_observe_matrix, ] @testset "MLE for $(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS - result_true = likelihood_optima(model) + result_true = DynamicPPL.TestUtils.likelihood_optima(model) # `NelderMead` seems to struggle with convergence here, so we exclude it. @testset "$(nameof(typeof(optimizer)))" for optimizer in [LBFGS(),]