Skip to content

Commit

Permalink
[NDTensors] [ITensors] Excise unneeded submodules (#1601)
Browse files Browse the repository at this point in the history
  • Loading branch information
mtfishman authored Feb 4, 2025
1 parent a61a67b commit ce09804
Show file tree
Hide file tree
Showing 427 changed files with 117 additions and 20,192 deletions.
4 changes: 3 additions & 1 deletion NDTensors/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman <[email protected]>"]
version = "0.3.74"
version = "0.4.0"

[deps]
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
Expand Down Expand Up @@ -29,6 +29,7 @@ Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67"
StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6"
TypeParameterAccessors = "7e5a90cf-f82e-492e-a09b-e3e26432c138"
VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8"

[weakdeps]
Expand Down Expand Up @@ -89,6 +90,7 @@ StridedViews = "0.2.2, 0.3"
TBLIS = "0.2"
TimerOutputs = "0.5.5"
TupleTools = "1.2.0"
TypeParameterAccessors = "0.2"
VectorInterface = "0.4.2, 0.5"
cuTENSOR = "2"
julia = "1.10"
Expand Down
10 changes: 4 additions & 6 deletions NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,14 @@ using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype
using NDTensors.AMDGPUExtensions: AMDGPUExtensions, ROCArrayAdaptor
using NDTensors.GPUArraysCoreExtensions: storagemode
using NDTensors.TypeParameterAccessors:
default_type_parameter,
set_type_parameter,
set_type_parameters,
type_parameter,
type_parameters
default_type_parameters, set_type_parameters, type_parameters
using Adapt: Adapt, adapt
using AMDGPU: AMDGPU, ROCArray, ROCVector
using Functors: fmap

function AMDGPUExtensions.roc(xs; storagemode=default_type_parameter(ROCArray, storagemode))
function AMDGPUExtensions.roc(
xs; storagemode=default_type_parameters(ROCArray, storagemode)
)
return fmap(x -> adapt(ROCArrayAdaptor{storagemode}(), x), xs)
end

Expand Down
4 changes: 2 additions & 2 deletions NDTensors/ext/NDTensorsCUDAExt/adapt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype
using NDTensors.CUDAExtensions: CUDAExtensions, CuArrayAdaptor
using NDTensors.GPUArraysCoreExtensions: storagemode
using NDTensors.TypeParameterAccessors:
default_type_parameter, set_type_parameters, type_parameters
default_type_parameters, set_type_parameters, type_parameters

function CUDAExtensions.cu(xs; storagemode=default_type_parameter(CuArray, storagemode))
function CUDAExtensions.cu(xs; storagemode=default_type_parameters(CuArray, storagemode))
return fmap(x -> adapt(CuArrayAdaptor{storagemode}(), x), xs)
end

Expand Down
5 changes: 2 additions & 3 deletions NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
using GPUArraysCore: @allowscalar, AbstractGPUArray
using NDTensors: NDTensors, BlockSparseTensor, dense, diag, map_diag!
using NDTensors.DiagonalArrays: diaglength
using NDTensors: NDTensors, BlockSparseTensor, dense, diag, diaglength, map_diag!
using NDTensors.Expose: Exposed, unexpose

## TODO to circumvent issues with blocksparse and scalar indexing
Expand All @@ -11,7 +10,7 @@ function NDTensors.diag(ETensor::Exposed{<:AbstractGPUArray,<:BlockSparseTensor}
return diag(dense(unexpose(ETensor)))
end

## TODO scalar indexing is slow here
## TODO scalar indexing is slow here
function NDTensors.map_diag!(
f::Function,
exposed_t_destination::Exposed{<:AbstractGPUArray,<:BlockSparseTensor},
Expand Down
10 changes: 1 addition & 9 deletions NDTensors/src/NDTensors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ include("abstractarray/similar.jl")
include("abstractarray/mul.jl")
include("abstractarray/permutedims.jl")
include("abstractarray/generic_array_constructors.jl")
include("abstractarray/diaginterface.jl")
include("array/permutedims.jl")
include("array/mul.jl")
include("tupletools.jl")
Expand Down Expand Up @@ -91,15 +92,6 @@ include("empty/adapt.jl")
#
include("deprecated.jl")

#####################################
# NDTensorsNamedDimsArraysExt
# I tried putting this inside of an
# `NDTensorsNamedDimsArraysExt` module
# but for some reason it kept overloading
# `Base.similar` instead of `NDTensors.similar`.
#
include("NDTensorsNamedDimsArraysExt/NDTensorsNamedDimsArraysExt.jl")

#####################################
# A global timer used with TimerOutputs.jl
#
Expand Down

This file was deleted.

1 change: 0 additions & 1 deletion NDTensors/src/NDTensorsNamedDimsArraysExt/fill.jl

This file was deleted.

5 changes: 0 additions & 5 deletions NDTensors/src/NDTensorsNamedDimsArraysExt/similar.jl

This file was deleted.

29 changes: 29 additions & 0 deletions NDTensors/src/abstractarray/diaginterface.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Selected interface functions from https://github.com/ITensor/DiagonalArrays.jl,
# copied here so we don't have to depend on `DiagonalArrays.jl`.

function diaglength(a::AbstractArray)
return minimum(size(a))
end

function diagstride(a::AbstractArray)
s = 1
p = 1
for i in 1:(ndims(a) - 1)
p *= size(a, i)
s += p
end
return s
end

function diagindices(a::AbstractArray)
maxdiag = LinearIndices(a)[CartesianIndex(ntuple(Returns(diaglength(a)), ndims(a)))]
return 1:diagstride(a):maxdiag
end

function diagindices(a::AbstractArray{<:Any,0})
return Base.OneTo(1)
end

function diagview(a::AbstractArray)
return @view a[diagindices(a)]
end
13 changes: 8 additions & 5 deletions NDTensors/src/abstractarray/generic_array_constructors.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
using .TypeParameterAccessors:
unwrap_array_type, specify_default_type_parameters, type_parameter
using TypeParameterAccessors:
unwrap_array_type,
specify_default_type_parameters,
specify_type_parameters,
type_parameters

# Convert to Array, avoiding copying if possible
array(a::AbstractArray) = a
Expand All @@ -8,9 +11,9 @@ vector(a::AbstractVector) = a

## Warning to use these functions it is necessary to define `TypeParameterAccessors.position(::Type{<:YourArrayType}, ::typeof(ndims)))`
# Implementation, catches if `ndims(arraytype) != length(dims)`.
## TODO convert ndims to `type_parameter(::, typeof(ndims))`
## TODO convert ndims to `type_parameters(::, typeof(ndims))`
function generic_randn(arraytype::Type{<:AbstractArray}, dims...; rng=Random.default_rng())
arraytype_specified = specify_type_parameter(
arraytype_specified = specify_type_parameters(
unwrap_array_type(arraytype), ndims, length(dims)
)
arraytype_specified = specify_default_type_parameters(arraytype_specified)
Expand All @@ -27,7 +30,7 @@ end

# Implementation, catches if `ndims(arraytype) != length(dims)`.
function generic_zeros(arraytype::Type{<:AbstractArray}, dims...)
arraytype_specified = specify_type_parameter(
arraytype_specified = specify_type_parameters(
unwrap_array_type(arraytype), ndims, length(dims)
)
arraytype_specified = specify_default_type_parameters(arraytype_specified)
Expand Down
2 changes: 1 addition & 1 deletion NDTensors/src/abstractarray/iscu.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
using .TypeParameterAccessors: unwrap_array_type
using TypeParameterAccessors: unwrap_array_type
# TODO: Make `isgpu`, `ismtl`, etc.
# For `isgpu`, will require a `NDTensorsGPUArrayCoreExt`.
iscu(A::AbstractArray) = iscu(typeof(A))
Expand Down
2 changes: 1 addition & 1 deletion NDTensors/src/abstractarray/set_types.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
using .TypeParameterAccessors: TypeParameterAccessors
using TypeParameterAccessors: TypeParameterAccessors

"""
# Do we still want to define things like this?
Expand Down
2 changes: 1 addition & 1 deletion NDTensors/src/abstractarray/similar.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
using Base: DimOrInd, Dims, OneTo
using .TypeParameterAccessors: IsWrappedArray, unwrap_array_type, set_eltype, similartype
using TypeParameterAccessors: IsWrappedArray, unwrap_array_type, set_eltype, similartype

## Custom `NDTensors.similar` implementation.
## More extensive than `Base.similar`.
Expand Down
6 changes: 3 additions & 3 deletions NDTensors/src/adapt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ double_precision(x) = fmap(x -> adapt(double_precision(eltype(x)), x), x)
# Used to adapt `EmptyStorage` types
#

using .TypeParameterAccessors: specify_type_parameter, specify_type_parameters
using TypeParameterAccessors: specify_type_parameters
function adapt_storagetype(to::Type{<:AbstractVector}, x::Type{<:TensorStorage})
return set_datatype(x, specify_type_parameter(to, eltype, eltype(x)))
return set_datatype(x, specify_type_parameters(to, eltype, eltype(x)))
end

function adapt_storagetype(to::Type{<:AbstractArray}, x::Type{<:TensorStorage})
return set_datatype(x, specify_type_parameter(to, (ndims, eltype), (1, eltype(x))))
return set_datatype(x, specify_type_parameters(to, (ndims, eltype), (1, eltype(x))))
end

This file was deleted.

This file was deleted.

61 changes: 0 additions & 61 deletions NDTensors/src/backup/arraystorage/arraystorage/storage/contract.jl

This file was deleted.

This file was deleted.

This file was deleted.

Loading

6 comments on commit ce09804

@mtfishman
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register subdir=NDTensors

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/124280

Tip: Release Notes

Did you know you can add release notes too? Just add markdown formatted text underneath the comment after the text
"Release notes:" and it will be added to the registry PR, and if TagBot is installed it will also be added to the
release that TagBot creates. i.e.

@JuliaRegistrator register

Release notes:

## Breaking changes

- blah

To add them here just re-invoke and the PR will be updated.

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a NDTensors-v0.4.0 -m "<description of version>" ce09804cbf63712ea6740aa976b1d30909fae07f
git push origin NDTensors-v0.4.0

@mtfishman
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register

Release notes:

Breaking changes

  • This release should not be breaking to the average user. This removes internal submodules that held experimental code for rewriting the internals of NDTensors.jl/ITensors.jl. It is marked as breaking since ITensorMPS.jl was making use of some that experimental code.

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/124284

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.8.0 -m "<description of version>" ce09804cbf63712ea6740aa976b1d30909fae07f
git push origin v0.8.0

@mtfishman
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register subdir=NDTensors

Release notes:

Breaking changes

  • This release should not be breaking to the average user. This removes internal submodules that held experimental code for rewriting the internals of NDTensors.jl/ITensors.jl. It is marked as breaking since ITensorMPS.jl was making use of some that experimental code.

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request updated: JuliaRegistries/General/124280

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a NDTensors-v0.4.0 -m "<description of version>" ce09804cbf63712ea6740aa976b1d30909fae07f
git push origin NDTensors-v0.4.0

Please sign in to comment.