Skip to content

Commit

Permalink
Merge pull request #1 from pasqal-io/antons_changes
Browse files Browse the repository at this point in the history
Antons changes
  • Loading branch information
a-quelle authored Nov 25, 2022
2 parents cc84451 + 513dcb6 commit 3e03a70
Show file tree
Hide file tree
Showing 3 changed files with 805 additions and 120 deletions.
96 changes: 96 additions & 0 deletions gpu_demo.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
```
To run this script:
- install julia 1.8
- check out https://github.com/ITensor/ITensors.jl.git at commit 6f6831e20ef5badc5ac82350e7524c968c876b4f
- julia -e 'using Pkg; Pkg.develop(path="<ITENSOR_ROOT>")'
- julia -e 'using Pkg; Pkg.develop(path="<ITENSOR_ROOT>/ITensorGPU")'
```
using ITensors
using CUDA
using CUDA.CUTENSOR

N = 100

function state_factors(raw_states, sites, links)
state_factors::Vector{ITensor} = [ITensor(raw_states[1], sites[1], links[1])]
for i in 2:N-1
factor = ITensor(raw_states[i], links[i-1], sites[i], links[i])
push!(state_factors, factor)
end
push!(state_factors, ITensor(raw_states[N], links[N-1], sites[N]))
return state_factors
end

function rotation_layer(raw_rotations, sites)
ops::Vector{ITensor}=[]
for (i,s) in enumerate(sites)
push!(ops, ITensor(raw_rotations[i], s', s))
end
return ops
end

function cu_state_factors(raw_states, cusites, culinks)
cu_state_factors::Vector{CuTensor} = [CuTensor(ComplexF64.(cu(raw_states[1])), [cusites[1], culinks[1]])]
for i in 2:N-1
factor = CuTensor(ComplexF64.(cu(raw_states[i])), [culinks[i-1], cusites[i], culinks[i]])
push!(cu_state_factors, factor)
end
push!(cu_state_factors, CuTensor(ComplexF64.(cu(raw_states[N])), [culinks[N-1], cusites[N]]))
return cu_state_factors
end

function cu_rotation_layer(raw_rotations, sites, sitesprime)
ops::Vector{CuTensor}=[]
for i in 1:N
push!(ops, CuTensor(ComplexF64.(cu(raw_rotations[i])), [sitesprime[i], sites[i]]))
end
return ops
end

function time_cpu_and_cuda(bond_dim)
angles = [pi/2 for _ in 1:3N]

Ry(theta) = cos(theta/2)*[1 0; 0 1] + im*sin(theta/2)*[0 -im; im 0]
Rx(theta) = cos(theta/2)*[1 0; 0 1] + im*sin(theta/2)*[0 1; 1 0]
rotations(t1::Number, t2::Number, t3::Number) = Ry(t3)*Rx(t2)*Ry(t1)

raw_states::Vector{AbstractArray} = [rand(2, bond_dim)+im*rand(2, bond_dim)]
for i in 2:N-1
push!(raw_states, rand(bond_dim, 2, bond_dim)+im*rand(bond_dim, 2, bond_dim))
end
push!(raw_states, rand(bond_dim, 2)+im*rand(bond_dim, 2))

raw_rotations = []
for i in 1:N
push!(raw_rotations, rotations(angles[1+3*(i-1)], angles[2+3*(i-1)], angles[3+3*(i-1)]))
end

sites = [Index(2) for _ in 1:N]
links = [Index(bond_dim) for _ in 1:N-1]

in_factors = state_factors(raw_states, sites, links)
layer = rotation_layer(raw_rotations, sites)
t1 = @timed contract.(in_factors, layer)


cusites = [Char(i) for i in 1:N]
cusitesprime = [Char(i) for i in N+1:2N]
culinks = [Char(i) for i in 2N+1:3N]

cu_in_factors = cu_state_factors(raw_states, cusites, culinks)
cu_layer = cu_rotation_layer(raw_rotations, cusites, cusitesprime)
t2 = CUDA.@elapsed cu_in_factors .* cu_layer

println("For bond dim $bond_dim, CPU took $(t1[:time])")
println("For bond dim $bond_dim, GPU took $t2")
println("-----------------")
return (CPU=t1[:time], GPU=t2)
end

time_cpu_and_cuda(10)
time_cpu_and_cuda(20)
time_cpu_and_cuda(40)
time_cpu_and_cuda(80)
time_cpu_and_cuda(160)
time_cpu_and_cuda(320)
time_cpu_and_cuda(640)
128 changes: 8 additions & 120 deletions tensor_networks.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -122,18 +122,6 @@
"hint: there is an inbuilt kron function in the language"
]
},
{
"cell_type": "markdown",
"id": "0ae71ba7",
"metadata": {},
"source": [
"solution:\n",
"\n",
"function tensor(a,b)\n",
" return kron(a,b)\n",
"end"
]
},
{
"cell_type": "markdown",
"id": "ed16c698",
Expand Down Expand Up @@ -233,22 +221,6 @@
"hint: recursively call tensor, for example by using foldl(tensor, .)"
]
},
{
"cell_type": "markdown",
"id": "8da9b908",
"metadata": {},
"source": [
"solution:\n",
"\n",
"function ghz_state(n)\n",
" return foldl(tensor, [[1,0] for _ in 1:n]) + foldl(tensor, [[0,1] for _ in 1:n])\n",
"end\n",
"\n",
"function hadamard_state(n)\n",
" return foldl(tensor, [[1,1] for _ in 1:n])\n",
"end"
]
},
{
"cell_type": "markdown",
"id": "6d698325",
Expand Down Expand Up @@ -428,7 +400,8 @@
" @assert elt(ψ, [1,2,1,2,2]) ≈ 1\n",
"\n",
" return nothing\n",
"end"
"end\n",
"test_tn_states()"
]
},
{
Expand All @@ -440,31 +413,6 @@
" https://itensor.github.io/ITensors.jl/stable/examples/MPSandMPO.html"
]
},
{
"cell_type": "markdown",
"id": "4fcdd292",
"metadata": {},
"source": [
"solution:\n",
"\n",
"function tn_ghz_state(n)\n",
" sites = siteinds(\"S=1/2\", n)\n",
" states_up = [\"Up\" for _ in 1:n]\n",
" states_dn = [\"Dn\" for _ in 1:n]\n",
" return MPS(sites, states_dn) + MPS(sites, states_up)\n",
"end\n",
"\n",
"function tn_hadamard_state(n)\n",
" sites = siteinds(\"S=1/2\", n)\n",
" state = randomMPS(sites) #this constructs a randomly initialized product wave-function in TN form\n",
" for s in 1:n #manually set each qubit to the [1, 1] state\n",
" state[s][1] = 1\n",
" state[s][2] = 1\n",
" end\n",
" return state\n",
"end"
]
},
{
"cell_type": "markdown",
"id": "d9665529",
Expand Down Expand Up @@ -601,12 +549,15 @@
" return scalar(V)\n",
" end\n",
" sites = siteinds(\"S=1/2\", N)\n",
" state = MPS(sites, [\"Up\"])\n",
" in = MPS(sites, [\"Dn\", \"Up\", \"Dn\"])\n",
" hwa = hwa_layer(sites, [pi/2, pi/2, -pi/2, pi/2, pi/2, -pi/2, pi/2, pi/2, -pi/2])\n",
" @assert elt(out,[2,2,1]) ≈ sqrt(2)+im*sqrt(2)\n",
" out = apply(hwa, in)\n",
" @assert elt(out,[2,2,1]) ≈ (sqrt(2)+im*sqrt(2))/2\n",
" \n",
" return nothing\n",
"end"
"end\n",
"\n",
"test_hwa_layer()"
]
},
{
Expand All @@ -618,27 +569,6 @@
"hint2: In the above file look at layer, variational_circuit, and lines 77-79"
]
},
{
"cell_type": "markdown",
"id": "7f9f4508",
"metadata": {},
"source": [
"Solution:\n",
"Ry(theta) = cos(theta/2)*[1 0; 0 1] + im*sin(theta/2)*[0 -im; im 0]\n",
"Rx(theta) = cos(theta/2)*[1 0; 0 1] + im*sin(theta/2)*[0 1; 1 0]\n",
"ITensors.op(::OpName\"rotations\", ::SiteType\"Qubit\"; t1::Number, t2::Number, t3::Number) = Ry(t3)*Rx(t2)*Ry(t1)\n",
"\n",
"function hwa_layer(sites, angles::Vector{<:Number})\n",
" layer = Prod{Op}()\n",
" for i in 1:length(sites)\n",
" layer = Op(\"rotations\", i; t1=angles[1+3*(i-1)], t2=angles[2+3*(i-1)], t3=angles[3+3*(i-1)])*layer\n",
" end\n",
" layer = Op(\"CNOT\", 1, 2)*layer\n",
" layer = Op(\"CNOT\", 2, 3)*layer\n",
" return Prod{ITensor}(layer, sites)\n",
"end"
]
},
{
"cell_type": "markdown",
"id": "003a4139",
Expand Down Expand Up @@ -704,24 +634,6 @@
"call orthogonalize or orthogonalize!"
]
},
{
"cell_type": "markdown",
"id": "0f40edd1",
"metadata": {},
"source": [
"solution:\n",
"\n",
"sites = siteinds(2, 5)\n",
"ψ = randomMPS(sites; linkdims = 5)\n",
"print(ψ)\n",
"orthogonalize!(ψ, 3)\n",
"print(ψ)\n",
"a = matrix(ψ[1], inds(ψ[1]))\n",
"print(a)\n",
"b = array(ψ[2], inds(ψ[2]))\n",
"print(b)"
]
},
{
"cell_type": "markdown",
"id": "e4dd25b9",
Expand Down Expand Up @@ -784,30 +696,6 @@
"you can copy any tensor, and then prime some of its indexes. Contracting the tensor with its copy will contract along the unprimed indices. https://itensor.github.io/ITensors.jl/stable/ITensorType.html#Priming_and_tagging_ITensor"
]
},
{
"cell_type": "markdown",
"id": "bbd01772",
"metadata": {},
"source": [
"solution:\n",
"\n",
"sites = siteinds(2, 5)\n",
"ψ = randomMPS(sites; linkdims = 5)\n",
"orthogonalize!(ψ, 3)\n",
"println(ψ)\n",
"a = ψ[1]\n",
"b = deepcopy(a)\n",
"prime!(b; tags=\"l=1\")\n",
"c = matrix(a*b)\n",
"println(c)\n",
"d = ψ[2]\n",
"e = deepcopy(d)\n",
"prime!(e; tags=\"l=2\")\n",
"f = matrix(d*e)\n",
"println(f)\n",
"#etc."
]
},
{
"cell_type": "markdown",
"id": "d4edeb9e",
Expand Down
Loading

0 comments on commit 3e03a70

Please sign in to comment.