Skip to content

Commit

Permalink
Remove formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
sunxd3 committed Dec 6, 2023
1 parent 7876898 commit 3531d2f
Show file tree
Hide file tree
Showing 9 changed files with 74 additions and 74 deletions.
16 changes: 8 additions & 8 deletions benchmarks/benchmarks_suite.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ data = [0, 1, 0, 1, 1, 1, 1, 1, 1, 1]


@model function constrained_test(obs)
p ~ Beta(2, 2)
p ~ Beta(2,2)
for i = 1:length(obs)
obs[i] ~ Bernoulli(p)
end
Expand Down Expand Up @@ -52,8 +52,8 @@ BenchmarkSuite["mnormal"] = BenchmarkGroup(["mnormal"])
# Define the target distribution and its gradient

@model function target(dim)
Θ = Vector{Real}(undef, dim)
θ ~ MvNormal(zeros(dim), I)
Θ = Vector{Real}(undef, dim)
θ ~ MvNormal(zeros(dim), I)
end

# Sampling parameter settings
Expand All @@ -67,14 +67,14 @@ BenchmarkSuite["mnormal"]["hmc"] = @benchmarkable sample($(target(dim)), $(HMC(0

@model function mdemo(d, N)
Θ = Vector(undef, N)
for n = 1:N
Θ[n] ~ d
end
for n=1:N
Θ[n] ~ d
end
end

dim2 = 250
A = rand(Wishart(dim2, Matrix{Float64}(I, dim2, dim2)));
d = MvNormal(zeros(dim2), A)
A = rand(Wishart(dim2, Matrix{Float64}(I, dim2, dim2)));
d = MvNormal(zeros(dim2), A)

# ForwardDiff
BenchmarkSuite["mnormal"]["forwarddiff"] = @benchmarkable sample($(mdemo(d, 1)), $(HMC(0.1, 5; adtype=AutoForwardDiff(; chunksize=0))), 5000)
Expand Down
16 changes: 8 additions & 8 deletions ext/TuringDynamicHMCExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ else
import ..DynamicHMC
using ..Turing
using ..Turing: AbstractMCMC, Random, LogDensityProblems, DynamicPPL
using ..Turing.Inference: ADTypes, LogDensityProblemsAD, TYPEDFIELDS
using ..Turing.Inference: ADTypes, LogDensityProblemsAD, TYPEDFIELDS
end

"""
Expand All @@ -25,22 +25,22 @@ To use it, make sure you have DynamicHMC package (version >= 2) loaded:
```julia
using DynamicHMC
```
"""
"""
struct DynamicNUTS{AD,space,T<:DynamicHMC.NUTS} <: Turing.Inference.Hamiltonian
sampler::T
adtype::AD
end

function DynamicNUTS(
spl::DynamicHMC.NUTS=DynamicHMC.NUTS(),
space::Tuple=();
adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff(; chunksize=0)
spl::DynamicHMC.NUTS = DynamicHMC.NUTS(),
space::Tuple = ();
adtype::ADTypes.AbstractADType = ADTypes.AutoForwardDiff(; chunksize=0)
)
return DynamicNUTS{typeof(adtype),space,typeof(spl)}(spl, adtype)
end
Turing.externalsampler(spl::DynamicHMC.NUTS) = DynamicNUTS(spl)

DynamicPPL.getspace(::DynamicNUTS{<:Any,space}) where {space} = space
DynamicPPL.getspace(::DynamicNUTS{<:Any, space}) where {space} = space

"""
DynamicNUTSState
Expand Down Expand Up @@ -82,8 +82,8 @@ function DynamicPPL.initialstep(
rng,
ℓ,
0;
initialization=(q=vi[spl],),
reporter=DynamicHMC.NoProgressReport(),
initialization = (q = vi[spl],),
reporter = DynamicHMC.NoProgressReport(),
)
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
Expand Down
34 changes: 17 additions & 17 deletions src/mcmc/hmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ sample(gdemo([1.5, 2]), HMC(0.1, 10), 1000)
sample(gdemo([1.5, 2]), HMC(0.01, 10), 1000)
```
"""
struct HMC{AD,space,metricT<:AHMC.AbstractMetric} <: StaticHamiltonian
struct HMC{AD, space, metricT <: AHMC.AbstractMetric} <: StaticHamiltonian
ϵ::Float64 # leapfrog step size
n_leapfrog::Int # leapfrog step number
adtype::AD
Expand All @@ -77,7 +77,7 @@ function HMC(
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=AutoForwardDiff(; chunksize=0),
)
return HMC(ϵ, n_leapfrog, metricT, space; adtype=adtype)
return HMC(ϵ, n_leapfrog, metricT, space; adtype = adtype)
end

DynamicPPL.initialsampler(::Sampler{<:Hamiltonian}) = SampleFromUniform()
Expand Down Expand Up @@ -115,9 +115,9 @@ function AbstractMCMC.sample(
end

return AbstractMCMC.mcmcsample(rng, model, sampler, N;
chain_type=chain_type, progress=progress,
nadapts=_nadapts, discard_initial=_discard_initial,
kwargs...)
chain_type=chain_type, progress=progress,
nadapts=_nadapts, discard_initial=_discard_initial,
kwargs...)
else
return AbstractMCMC.mcmcsample(
rng, model, sampler, N;
Expand Down Expand Up @@ -205,7 +205,7 @@ function DynamicPPL.initialstep(
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ =
AHMC.adapt!(hamiltonian, kernel, adaptor,
1, nadapts, t.z.θ, t.stat.acceptance_rate)
1, nadapts, t.z.θ, t.stat.acceptance_rate)
end

# Update `vi` based on acceptance
Expand Down Expand Up @@ -244,7 +244,7 @@ function AbstractMCMC.step(
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ =
AHMC.adapt!(hamiltonian, state.kernel, state.adaptor,
i, nadapts, t.z.θ, t.stat.acceptance_rate)
i, nadapts, t.z.θ, t.stat.acceptance_rate)
else
kernel = state.kernel
end
Expand Down Expand Up @@ -308,11 +308,11 @@ Hoffman, Matthew D., and Andrew Gelman. "The No-U-turn sampler: adaptively
setting path lengths in Hamiltonian Monte Carlo." Journal of Machine Learning
Research 15, no. 1 (2014): 1593-1623.
"""
struct HMCDA{AD,space,metricT<:AHMC.AbstractMetric} <: AdaptiveHamiltonian
n_adapts::Int # number of samples with adaption for ϵ
δ::Float64 # target accept rate
λ::Float64 # target leapfrog length
ϵ::Float64 # (initial) step size
struct HMCDA{AD, space, metricT <: AHMC.AbstractMetric} <: AdaptiveHamiltonian
n_adapts :: Int # number of samples with adaption for ϵ
δ :: Float64 # target accept rate
λ :: Float64 # target leapfrog length
ϵ :: Float64 # (initial) step size
adtype::AD
end

Expand All @@ -327,7 +327,7 @@ function HMCDA(
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=AutoForwardDiff(; chunksize=0),
)
return HMCDA(-1, δ, λ, init_ϵ, metricT, (); adtype=adtype)
return HMCDA(-1, δ, λ, init_ϵ, metricT, (); adtype = adtype)
end

function HMCDA(
Expand All @@ -349,7 +349,7 @@ function HMCDA(
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=AutoForwardDiff(; chunksize=0),
)
return HMCDA(n_adapts, δ, λ, init_ϵ, metricT, space; adtype=adtype)
return HMCDA(n_adapts, δ, λ, init_ϵ, metricT, space; adtype = adtype)
end


Expand Down Expand Up @@ -436,7 +436,7 @@ function NUTS(; kwargs...)
end

for alg in (:HMC, :HMCDA, :NUTS)
@eval getmetricT(::$alg{<:Any,<:Any,metricT}) where {metricT} = metricT
@eval getmetricT(::$alg{<:Any, <:Any, metricT}) where {metricT} = metricT
end

#####
Expand Down Expand Up @@ -488,7 +488,7 @@ end
function DynamicPPL.dot_assume(
rng,
spl::Sampler{<:Hamiltonian},
dists::Union{Distribution,AbstractArray{<:Distribution}},
dists::Union{Distribution, AbstractArray{<:Distribution}},
vns::AbstractArray{<:VarName},
var::AbstractArray,
vi,
Expand All @@ -508,7 +508,7 @@ end

function DynamicPPL.dot_observe(
spl::Sampler{<:Hamiltonian},
ds::Union{Distribution,AbstractArray{<:Distribution}},
ds::Union{Distribution, AbstractArray{<:Distribution}},
value::AbstractArray,
vi,
)
Expand Down
12 changes: 6 additions & 6 deletions src/mcmc/sghmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ function SGHMC(
return SGHMC{typeof(adtype),space,typeof(_learning_rate)}(_learning_rate, _momentum_decay, adtype)
end

struct SGHMCState{L,V<:AbstractVarInfo,T<:AbstractVector{<:Real}}
struct SGHMCState{L,V<:AbstractVarInfo, T<:AbstractVector{<:Real}}
logdensity::L
vi::V
velocity::T
Expand Down Expand Up @@ -134,7 +134,7 @@ struct PolynomialStepsize{T<:Real}
"Decay rate of step size in (0.5, 1]."
γ::T

function PolynomialStepsize{T}(a::T, b::T, γ::T) where {T}
function PolynomialStepsize{T}(a::T, b::T, γ::T) where T
0.5 < γ 1 || error("the decay rate `γ` has to be in (0.5, 1]")
return new{T}(a, b, γ)
end
Expand All @@ -153,7 +153,7 @@ a (b + t)^{-γ}.
function PolynomialStepsize(a::T, b::T, γ::T) where {T<:Real}
return PolynomialStepsize{T}(a, b, γ)
end
function PolynomialStepsize(a::Real, b::Real=0, γ::Real=0.55)
function PolynomialStepsize(a::Real, b::Real = 0, γ::Real = 0.55)
return PolynomialStepsize(promote(a, b, γ)...)
end

Expand Down Expand Up @@ -183,8 +183,8 @@ See also: [`PolynomialStepsize`](@ref)
"""
function SGLD(
space::Symbol...;
stepsize=PolynomialStepsize(0.01),
adtype::ADTypes.AbstractADType=AutoForwardDiff(; chunksize=0),
stepsize = PolynomialStepsize(0.01),
adtype::ADTypes.AbstractADType = AutoForwardDiff(; chunksize=0),
)
return SGLD{typeof(adtype),space,typeof(stepsize)}(stepsize, adtype)
end
Expand All @@ -204,7 +204,7 @@ function SGLDTransition(model::DynamicPPL.Model, vi::AbstractVarInfo, stepsize)
return SGLDTransition(theta, lp, stepsize)
end

metadata(t::SGLDTransition) = (lp=t.lp, SGLD_stepsize=t.stepsize)
metadata(t::SGLDTransition) = (lp = t.lp, SGLD_stepsize = t.stepsize)

DynamicPPL.getlogp(t::SGLDTransition) = t.lp

Expand Down
14 changes: 7 additions & 7 deletions test/mcmc/Inference.jl
Original file line number Diff line number Diff line change
Expand Up @@ -122,24 +122,24 @@
chains = sample(gdemo_d(), Prior(), N)
@test chains isa MCMCChains.Chains
@test size(chains) == (N, 3, 1)
@test mean(chains, :s) 3 atol = 0.1
@test mean(chains, :m) 0 atol = 0.1
@test mean(chains, :s) 3 atol=0.1
@test mean(chains, :m) 0 atol=0.1

Random.seed!(100)
chains = sample(gdemo_d(), Prior(), MCMCThreads(), N, 4)
@test chains isa MCMCChains.Chains
@test size(chains) == (N, 3, 4)
@test mean(chains, :s) 3 atol = 0.1
@test mean(chains, :m) 0 atol = 0.1
@test mean(chains, :s) 3 atol=0.1
@test mean(chains, :m) 0 atol=0.1

Random.seed!(100)
chains = sample(gdemo_d(), Prior(), N; chain_type=Vector{NamedTuple})
chains = sample(gdemo_d(), Prior(), N; chain_type = Vector{NamedTuple})
@test chains isa Vector{<:NamedTuple}
@test length(chains) == N
@test all(length(x) == 3 for x in chains)
@test all(haskey(x, :lp) for x in chains)
@test mean(x[:s][1] for x in chains) 3 atol = 0.1
@test mean(x[:m][1] for x in chains) 0 atol = 0.1
@test mean(x[:s][1] for x in chains) 3 atol=0.1
@test mean(x[:m][1] for x in chains) 0 atol=0.1
end

@testset "chain ordering" begin
Expand Down
18 changes: 9 additions & 9 deletions test/mcmc/gibbs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,21 +34,21 @@
Random.seed!(100)
alg = Gibbs(CSMC(15, :s), HMC(0.2, 4, :m; adtype=adbackend))
chain = sample(gdemo(1.5, 2.0), alg, 10_000)
check_numerical(chain, [:s, :m], [49 / 24, 7 / 6], atol=0.15)
check_numerical(chain, [:s, :m], [49/24, 7/6], atol=0.15)

Random.seed!(100)

alg = Gibbs(MH(:s), HMC(0.2, 4, :m; adtype=adbackend))
chain = sample(gdemo(1.5, 2.0), alg, 10_000)
check_numerical(chain, [:s, :m], [49 / 24, 7 / 6], atol=0.1)
check_numerical(chain, [:s, :m], [49/24, 7/6], atol=0.1)

alg = Gibbs(CSMC(15, :s), ESS(:m))
chain = sample(gdemo(1.5, 2.0), alg, 10_000)
check_numerical(chain, [:s, :m], [49 / 24, 7 / 6], atol=0.1)
check_numerical(chain, [:s, :m], [49/24, 7/6], atol=0.1)

alg = CSMC(15)
chain = sample(gdemo(1.5, 2.0), alg, 10_000)
check_numerical(chain, [:s, :m], [49 / 24, 7 / 6], atol=0.1)
check_numerical(chain, [:s, :m], [49/24, 7/6], atol=0.1)

Random.seed!(200)
gibbs = Gibbs(PG(15, :z1, :z2, :z3, :z4), HMC(0.15, 3, :mu1, :mu2; adtype=adbackend))
Expand Down Expand Up @@ -94,29 +94,29 @@
end

alg = Gibbs(MH(:s), HMC(0.2, 4, :m; adtype=adbackend))
sample(model, alg, 100; callback=callback)
sample(model, alg, 100; callback = callback)
end
@turing_testset "dynamic model" begin
@model function imm(y, alpha, ::Type{M}=Vector{Float64}) where {M}
N = length(y)
rpm = DirichletProcess(alpha)

z = zeros(Int, N)
cluster_counts = zeros(Int, N)
fill!(cluster_counts, 0)

for i in 1:N
z[i] ~ ChineseRestaurantProcess(rpm, cluster_counts)
cluster_counts[z[i]] += 1
end

Kmax = findlast(!iszero, cluster_counts)
m = M(undef, Kmax)
for k = 1:Kmax
m[k] ~ Normal(1.0, 1.0)
end
end
model = imm(randn(100), 1.0)
model = imm(randn(100), 1.0);
# https://github.com/TuringLang/Turing.jl/issues/1725
# sample(model, Gibbs(MH(:z), HMC(0.01, 4, :m)), 100);
sample(model, Gibbs(PG(10, :z), HMC(0.01, 4, :m; adtype=adbackend)), 100)
Expand Down
Loading

0 comments on commit 3531d2f

Please sign in to comment.