diff --git a/Project.toml b/Project.toml index 7e62251..c80c9f0 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "MCIntegration" uuid = "ea1e2de9-7db7-4b42-91ee-0cd1bf6df167" authors = ["Kun Chen", "Xiansheng Cai", "Pengcheng Hou"] -version = "0.4.0" +version = "0.4.1" [deps] Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" diff --git a/example/benchmark/cuba/benchmark.jl b/example/benchmark/cuba/benchmark.jl index b4cfd8d..6131836 100644 --- a/example/benchmark/cuba/benchmark.jl +++ b/example/benchmark/cuba/benchmark.jl @@ -72,6 +72,20 @@ function test2(x, c) t11(x[1], x[2], x[3]) end +@inbounds function test3(x, f, c) + @inbounds f[1] = t1(x[1], x[2], x[3]) + @inbounds f[2] = t2(x[1], x[2], x[3]) + @inbounds f[3] = t3(x[1], x[2], x[3]) + @inbounds f[4] = t4(x[1], x[2], x[3]) + @inbounds f[5] = t5(x[1], x[2], x[3]) + @inbounds f[6] = t6(x[1], x[2], x[3]) + @inbounds f[7] = t7(x[1], x[2], x[3]) + @inbounds f[8] = t8(x[1], x[2], x[3]) + @inbounds f[9] = t9(x[1], x[2], x[3]) + @inbounds f[10] = t10(x[1], x[2], x[3]) + @inbounds f[11] = t11(x[1], x[2], x[3]) +end + @info "Performance of Cuba.jl:" for alg in (vegas, suave, divonne, cuhre) # Run the integrator a first time to compile the function. @@ -120,9 +134,9 @@ end @info "Performance of MCIntegration:" for alg in (:vegas, :vegasmc) # Run the integrator a first time to compile the function. - integrate(test2; dof=[[3,] for i in 1:11], neval=1e4, solver=alg, print=-1) + integrate(test3; dof=[[3,] for i in 1:11], neval=1e4, solver=alg, print=-1, inplace=true) start_time = time_ns() - integrate(test2; dof=[[3,] for i in 1:11], neval=1e5, solver=alg, print=-2) + integrate(test3; dof=[[3,] for i in 1:11], neval=1e5, solver=alg, print=-2, inplace=true) # Cuba will run for 1e6 steps end_time = time_ns() println(@sprintf("%10.6f", Int(end_time - start_time) / 1e9), @@ -146,7 +160,7 @@ end cd(@__DIR__) do if mtime("benchmark.c") > mtime("benchmark-c") - run(`gcc -O3 -I $(Cuba.Cuba_jll.artifact_dir)/include -o benchmark-c benchmark.c $(Cuba.Cuba_jll.libcuba_path) -lm`) + run(`gcc -O1 -I $(Cuba.Cuba_jll.artifact_dir)/include -o benchmark-c benchmark.c $(Cuba.Cuba_jll.libcuba_path) -lm`) end @info "Performance of Cuba Library in C:" withenv(Cuba.Cuba_jll.JLLWrappers.LIBPATH_env => Cuba.Cuba_jll.LIBPATH[]) do @@ -155,7 +169,7 @@ cd(@__DIR__) do if success(`which gfortran`) if mtime("benchmark.f") > mtime("benchmark-fortran") - run(`gfortran -O3 -fcheck=no-bounds -cpp -o benchmark-fortran benchmark.f $(Cuba.Cuba_jll.libcuba_path) -lm`) + run(`gfortran -O1 -fcheck=no-bounds -cpp -o benchmark-fortran benchmark.f $(Cuba.Cuba_jll.libcuba_path) -lm`) end @info "Performance of Cuba Library in Fortran:" withenv(Cuba.Cuba_jll.JLLWrappers.LIBPATH_env => Cuba.Cuba_jll.LIBPATH[]) do diff --git a/src/distribution/variable.jl b/src/distribution/variable.jl index dff7680..315961b 100644 --- a/src/distribution/variable.jl +++ b/src/distribution/variable.jl @@ -639,6 +639,22 @@ function padding_probability(config, idx) end return prob end +function padding_probability!(config, probs::AbstractVector) + for i in eachindex(probs) + probs[i] = 1.0 + dof = config.dof[i] + for (vi, var) in enumerate(config.var) + offset = var.offset + for pos = dof[vi]+1:config.maxdof[vi] + probs[i] *= var.prob[pos+offset] + end + end + if probs[i] < TINY + @warn "probability is either too small or negative : $(probs[i])" + end + # @assert probs[i] ≈ padding_probability(config, i) "$(probs[i]) vs $(padding_probability(config, i))" + end +end function delta_probability(config, curr=config.curr; new) prob = 1.0 diff --git a/src/vegas/montecarlo.jl b/src/vegas/montecarlo.jl index 9bfeea2..506942b 100644 --- a/src/vegas/montecarlo.jl +++ b/src/vegas/montecarlo.jl @@ -78,6 +78,8 @@ function montecarlo(config::Configuration{Ni,V,P,O,T}, integrand::Function, neva relativeWeights = zeros(T, Ni) weights = zeros(T, Ni) + padding_probability = ones(Ni) + diff = [config.dof[i] == config.maxdof for i in 1:Ni] # check if the dof is the same as the maxdof, if the same, then there is no need to update the padding probability ################## test integrand type stability ###################### if debug @@ -127,6 +129,12 @@ function montecarlo(config::Configuration{Ni,V,P,O,T}, integrand::Function, neva # jac *= Dist.create!(var, idx + var.offset, config) end end + # Dist.padding_probability!(config, padding_probability) + for i in 1:Ni + if diff[i] == false + padding_probability[i] = Dist.padding_probability(config, i) + end + end # weights = @_expanded_integrand(config, integrand, 1) # very fast, but requires explicit N # weights = integrand_wrap(config, integrand) #make a lot of allocations if inplace @@ -140,15 +148,18 @@ function montecarlo(config::Configuration{Ni,V,P,O,T}, integrand::Function, neva @assert typeof(weights) <: Tuple || typeof(weights) <: AbstractVector "the integrand should return a vector with $(Ni) elements, but it returns a vector elements! $(typeof(weights)))" end + # println("before: ", weights, "with jac = ", jac) + if (ne % measurefreq == 0) if isnothing(measure) + # println("after: ", weights * jac) for i in 1:Ni - config.observable[i] += weights[i] * jac + config.observable[i] += weights[i] * padding_probability[i] * jac end # observable += weights * jac else for i in 1:Ni - relativeWeights[i] = weights[i] * jac + relativeWeights[i] = weights[i] * padding_probability[i] * jac end (fieldcount(V) == 1) ? measure(config.var[1], config.observable, relativeWeights, config) : @@ -166,6 +177,7 @@ function montecarlo(config::Configuration{Ni,V,P,O,T}, integrand::Function, neva for idx in eachindex(weights) w2 = abs(weights[idx]) j2 = jac + # ! warning: need to check whether to use jac or jac*padding_probability[idx] if debug && (isfinite(w2) == false) @warn("abs of the integrand $idx = $(w2) is not finite at step $(config.neval)") end diff --git a/src/vegas_mc/updates.jl b/src/vegas_mc/updates.jl index 3d46656..b1eb15c 100644 --- a/src/vegas_mc/updates.jl +++ b/src/vegas_mc/updates.jl @@ -79,6 +79,7 @@ function changeVariable(config::Configuration{N,V,P,O,T}, integrand, inplace, for i in 1:N+1 _padding_probability[i] = Dist.padding_probability(config, i) end + # Dist.padding_probability!(config, _padding_probability) # println(_padding_probability) newProbability = config.reweight[config.norm] * _padding_probability[config.norm] #normalization integral for i in 1:N #other integrals diff --git a/test/montecarlo.jl b/test/montecarlo.jl index 26fff19..ed44632 100644 --- a/test/montecarlo.jl +++ b/test/montecarlo.jl @@ -197,6 +197,24 @@ function TestComplex2_inplace(totalstep, alg) return res end +function TestHyperSphere(totalstep, alg, N) + function volume_inverse(d) + euler = 2.71828182845904523536028747135266249775724709369995957496696763 + return (d / (2π * euler))^(d / 2) * sqrt(d) * sqrt(π) + end + + function f(x, w, c) + _w = x[1]^2 + for i = 1:c.userdata + _w += x[i+1]^2 + w[i] = _w < 1.0 ? volume_inverse(i + 1) : 0.0 + end + end + + res = integrate(f; var=Continuous(-1, 1), dof=[[i + 1,] for i in 1:N], userdata=N, neval=totalstep, print=-1, solver=alg, debug=false, inplace=true) + return res +end + # struct Weight <: AbstractVector # d::Tuple{Float64,Float64} # function Weight(a, b) @@ -311,6 +329,9 @@ end println("inplace Complex2") check_complex(TestComplex2_inplace(neval, :vegas), [0.5, 1.0 / 3 * 1im]) + println("hypersphere") + check(TestHyperSphere(neval, :vegas, 3), [0.9230, 0.94724, 0.96118]) + # println("vector type") # check_vector(Test_user_type(neval, :vegas), [0.5, 1.0 / 3]) end @@ -358,6 +379,9 @@ end println("inplace Complex2") check_complex(TestComplex2_inplace(neval, :vegasmc), [0.5, 1.0 / 3 * 1im]) + println("hypersphere") + check(TestHyperSphere(neval, :vegasmc, 3), [0.9230, 0.94724, 0.96118]) + # println("vector type") # check_vector(Test_user_type(neval, :vegamcs), [0.5, 1.0 / 3]) end