Skip to content

Commit

Permalink
Merge branch 'branch-24.02' into ancestors_and_descendants
Browse files Browse the repository at this point in the history
  • Loading branch information
eriknw committed Dec 19, 2023
2 parents a19ed3d + d7c88d1 commit 4fc915e
Show file tree
Hide file tree
Showing 73 changed files with 1,350 additions and 1,157 deletions.
61 changes: 13 additions & 48 deletions build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ HELP="$0 [<target> ...] [<flag> ...]
-v - verbose build mode
-g - build for debug
-n - do not install after a successful build (does not affect Python packages)
--pydevelop - use setup.py develop instead of install
--pydevelop - install the Python packages in editable mode
--allgpuarch - build for all supported GPU architectures
--skip_cpp_tests - do not build the SG test binaries as part of the libcugraph and libcugraph_etl targets
--without_cugraphops - do not build algos that require cugraph-ops
Expand Down Expand Up @@ -187,14 +187,18 @@ if hasArg --cmake_default_generator; then
CMAKE_GENERATOR_OPTION=""
fi
if hasArg --pydevelop; then
PYTHON_ARGS_FOR_INSTALL="-m pip install --no-build-isolation --no-deps -e"
PYTHON_ARGS_FOR_INSTALL="${PYTHON_ARGS_FOR_INSTALL} -e"
fi

# Append `-DFIND_RAFT_CPP=ON` to EXTRA_CMAKE_ARGS unless a user specified the option.
SKBUILD_EXTRA_CMAKE_ARGS="${EXTRA_CMAKE_ARGS}"
if [[ "${EXTRA_CMAKE_ARGS}" != *"DFIND_CUGRAPH_CPP"* ]]; then
SKBUILD_EXTRA_CMAKE_ARGS="${SKBUILD_EXTRA_CMAKE_ARGS} -DFIND_CUGRAPH_CPP=ON"
fi

# Replace spaces with semicolons in SKBUILD_EXTRA_CMAKE_ARGS
SKBUILD_EXTRA_CMAKE_ARGS=$(echo ${SKBUILD_EXTRA_CMAKE_ARGS} | sed 's/ /;/g')

# Append `-DFIND_CUGRAPH_CPP=ON` to EXTRA_CMAKE_ARGS unless a user specified the option.
if [[ "${EXTRA_CMAKE_ARGS}" != *"DFIND_CUGRAPH_CPP"* ]]; then
SKBUILD_EXTRA_CMAKE_ARGS="${SKBUILD_EXTRA_CMAKE_ARGS};-DFIND_CUGRAPH_CPP=ON"
fi

# If clean or uninstall targets given, run them prior to any other steps
if hasArg uninstall; then
Expand All @@ -213,8 +217,7 @@ if hasArg uninstall; then
if [ -e ${LIBCUGRAPH_BUILD_DIR}/install_manifest.txt ]; then
xargs rm -f < ${LIBCUGRAPH_BUILD_DIR}/install_manifest.txt > /dev/null 2>&1
fi
# uninstall cugraph and pylibcugraph installed from a prior "setup.py
# install"
# uninstall cugraph and pylibcugraph installed from a prior install
# FIXME: if multiple versions of these packages are installed, this only
# removes the latest one and leaves the others installed. build.sh uninstall
# can be run multiple times to remove all of them, but that is not obvious.
Expand All @@ -226,10 +229,6 @@ if hasArg clean; then
# Ignore errors for clean since missing files, etc. are not failures
set +e
# remove artifacts generated inplace
# FIXME: ideally the "setup.py clean" command would be used for this, but
# currently running any setup.py command has side effects (eg. cloning
# repos).
# (cd ${REPODIR}/python && python setup.py clean)
if [[ -d ${REPODIR}/python ]]; then
cleanPythonDir ${REPODIR}/python
fi
Expand Down Expand Up @@ -317,24 +316,7 @@ if buildDefault || hasArg pylibcugraph || hasArg all; then
if hasArg --clean; then
cleanPythonDir ${REPODIR}/python/pylibcugraph
else
# FIXME: skbuild with setuptools>=64 has a bug when called from a "pip
# install -e" command, resulting in a broken editable wheel. Continue
# to use "setup.py bdist_ext --inplace" for a develop build until
# https://github.com/scikit-build/scikit-build/issues/981 is closed.
if hasArg --pydevelop; then
cd ${REPODIR}/python/pylibcugraph
python setup.py build_ext \
--inplace \
-- \
-DFIND_CUGRAPH_CPP=ON \
-DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS} \
-Dcugraph_ROOT=${LIBCUGRAPH_BUILD_DIR} \
-- \
-j${PARALLEL_LEVEL:-1}
cd -
fi
SKBUILD_CONFIGURE_OPTIONS="${SKBUILD_EXTRA_CMAKE_ARGS} -DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS}" \
SKBUILD_BUILD_OPTIONS="-j${PARALLEL_LEVEL}" \
SKBUILD_CMAKE_ARGS="${SKBUILD_EXTRA_CMAKE_ARGS};-DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS}" \
python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/pylibcugraph
fi
fi
Expand All @@ -344,24 +326,7 @@ if buildDefault || hasArg cugraph || hasArg all; then
if hasArg --clean; then
cleanPythonDir ${REPODIR}/python/cugraph
else
# FIXME: skbuild with setuptools>=64 has a bug when called from a "pip
# install -e" command, resulting in a broken editable wheel. Continue
# to use "setup.py bdist_ext --inplace" for a develop build until
# https://github.com/scikit-build/scikit-build/issues/981 is closed.
if hasArg --pydevelop; then
cd ${REPODIR}/python/cugraph
python setup.py build_ext \
--inplace \
-- \
-DFIND_CUGRAPH_CPP=ON \
-DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS} \
-Dcugraph_ROOT=${LIBCUGRAPH_BUILD_DIR} \
-- \
-j${PARALLEL_LEVEL:-1}
cd -
fi
SKBUILD_CONFIGURE_OPTIONS="${SKBUILD_EXTRA_CMAKE_ARGS} -DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS}" \
SKBUILD_BUILD_OPTIONS="-j${PARALLEL_LEVEL}" \
SKBUILD_CMAKE_ARGS="${SKBUILD_EXTRA_CMAKE_ARGS};-DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS}" \
python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph
fi
fi
Expand Down
2 changes: 1 addition & 1 deletion ci/build_wheel_cugraph.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@ RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})"
RAPIDS_PY_WHEEL_NAME=pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX} rapids-download-wheels-from-s3 ./local-pylibcugraph
export PIP_FIND_LINKS=$(pwd)/local-pylibcugraph

export SKBUILD_CONFIGURE_OPTIONS="-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/"
export SKBUILD_CMAKE_ARGS="-DDETECT_CONDA_ENV=OFF;-DFIND_CUGRAPH_CPP=OFF;-DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/"

./ci/build_wheel.sh cugraph python/cugraph
2 changes: 1 addition & 1 deletion ci/build_wheel_pylibcugraph.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@

set -euo pipefail

export SKBUILD_CONFIGURE_OPTIONS="-DDETECT_CONDA_ENV=OFF -DCUGRAPH_BUILD_WHEELS=ON -DFIND_CUGRAPH_CPP=OFF -DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/"
export SKBUILD_CMAKE_ARGS="-DDETECT_CONDA_ENV=OFF;-DFIND_CUGRAPH_CPP=OFF;-DCPM_cugraph-ops_SOURCE=${GITHUB_WORKSPACE}/cugraph-ops/"

./ci/build_wheel.sh pylibcugraph python/pylibcugraph
7 changes: 0 additions & 7 deletions ci/notebook_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ def skip_book_dir(runtype):
# Not strictly true... however what we mean is
# Pascal or earlier
#
pascal = False
ampere = False
device = cuda.get_current_device()

Expand All @@ -62,8 +61,6 @@ def skip_book_dir(runtype):
cc = getattr(device, "COMPUTE_CAPABILITY", None) or getattr(
device, "compute_capability"
)
if cc[0] < 7:
pascal = True
if cc[0] >= 8:
ampere = True

Expand Down Expand Up @@ -91,10 +88,6 @@ def skip_book_dir(runtype):
)
skip = True
break
elif pascal and re.search("# Does not run on Pascal", line):
print(f"SKIPPING {filename} (does not run on Pascal)", file=sys.stderr)
skip = True
break
elif ampere and re.search("# Does not run on Ampere", line):
print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr)
skip = True
Expand Down
4 changes: 1 addition & 3 deletions ci/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,7 @@ fi
# EXITCODE for the script.
set +e

if (python ${CUGRAPH_ROOT}/ci/utils/is_pascal.py); then
echo "WARNING: skipping C++ tests on Pascal GPU arch."
elif hasArg "--run-cpp-tests"; then
if hasArg "--run-cpp-tests"; then
echo "C++ gtests for cuGraph (single-GPU only)..."
for gt in "${CONDA_PREFIX}/bin/gtests/libcugraph/"*_TEST; do
test_name=$(basename $gt)
Expand Down
37 changes: 0 additions & 37 deletions ci/utils/is_pascal.py

This file was deleted.

2 changes: 1 addition & 1 deletion conda/environments/all_cuda-118_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ dependencies:
- recommonmark
- requests
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-build-core>=0.7.0
- scikit-learn>=0.23.1
- scipy
- setuptools>=61.0.0
Expand Down
2 changes: 1 addition & 1 deletion conda/environments/all_cuda-120_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ dependencies:
- recommonmark
- requests
- rmm==24.2.*
- scikit-build>=0.13.1
- scikit-build-core>=0.7.0
- scikit-learn>=0.23.1
- scipy
- setuptools>=61.0.0
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/cugraph-pyg/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ requirements:
host:
- cython >=3.0.0
- python
- scikit-build >=0.13.1
- scikit-build-core >=0.7.0
run:
- rapids-dask-dependency ={{ minor_version }}
- numba >=0.57
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/cugraph/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ requirements:
- python
- raft-dask ={{ minor_version }}
- rmm ={{ minor_version }}
- scikit-build >=0.13.1
- scikit-build-core >=0.7.0
- setuptools
run:
- aiohttp
Expand Down
2 changes: 1 addition & 1 deletion conda/recipes/pylibcugraph/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ requirements:
- libcugraph ={{ version }}
- pylibraft ={{ minor_version }}
- python
- scikit-build >=0.13.1
- scikit-build-core >=0.7.0
- setuptools
run:
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
Expand Down
4 changes: 2 additions & 2 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ endif()
# cuhornet currently doesn't support
#
# >= 90
set(supported_archs "60" "62" "70" "72" "75" "80" "86" "89" "90")
set(supported_archs "70" "72" "75" "80" "86" "89" "90")
foreach( arch IN LISTS CMAKE_CUDA_ARCHITECTURES)
string(REPLACE "-real" "" arch ${arch})
if( arch IN_LIST supported_archs )
Expand Down Expand Up @@ -423,7 +423,7 @@ add_library(cugraph_c
src/c_api/core_result.cpp
src/c_api/extract_ego.cpp
src/c_api/k_core.cpp
src/c_api/hierarchical_clustering_result.cpp
src/c_api/hierarchical_clustering_result.cpp
src/c_api/induced_subgraph.cpp
src/c_api/capi_helper.cu
src/c_api/legacy_spectral.cpp
Expand Down
47 changes: 22 additions & 25 deletions cpp/include/cugraph/graph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,24 +90,25 @@ class graph_t<vertex_t, edge_t, store_transposed, multi_gpu, std::enable_if_t<mu
graph_meta_t<vertex_t, edge_t, multi_gpu> meta,
bool do_expensive_check = false);

edge_t number_of_edges() const { return this->number_of_edges_; }

graph_view_t<vertex_t, edge_t, store_transposed, multi_gpu> view() const
{
std::vector<edge_t const*> offsets(edge_partition_offsets_.size(), nullptr);
std::vector<vertex_t const*> indices(edge_partition_indices_.size(), nullptr);
auto dcs_nzd_vertices = edge_partition_dcs_nzd_vertices_
? std::make_optional<std::vector<vertex_t const*>>(
(*edge_partition_dcs_nzd_vertices_).size(), nullptr)
: std::nullopt;
auto dcs_nzd_vertex_counts = edge_partition_dcs_nzd_vertex_counts_
? std::make_optional<std::vector<vertex_t>>(
(*edge_partition_dcs_nzd_vertex_counts_).size(), vertex_t{0})
: std::nullopt;
std::vector<raft::device_span<edge_t const>> offsets(edge_partition_offsets_.size());
std::vector<raft::device_span<vertex_t const>> indices(edge_partition_indices_.size());
auto dcs_nzd_vertices = edge_partition_dcs_nzd_vertices_
? std::make_optional<std::vector<raft::device_span<vertex_t const>>>(
(*edge_partition_dcs_nzd_vertices_).size())
: std::nullopt;
for (size_t i = 0; i < offsets.size(); ++i) {
offsets[i] = edge_partition_offsets_[i].data();
indices[i] = edge_partition_indices_[i].data();
offsets[i] = raft::device_span<edge_t const>(edge_partition_offsets_[i].data(),
edge_partition_offsets_[i].size());
indices[i] = raft::device_span<vertex_t const>(edge_partition_indices_[i].data(),
edge_partition_indices_[i].size());
if (dcs_nzd_vertices) {
(*dcs_nzd_vertices)[i] = (*edge_partition_dcs_nzd_vertices_)[i].data();
(*dcs_nzd_vertex_counts)[i] = (*edge_partition_dcs_nzd_vertex_counts_)[i];
(*dcs_nzd_vertices)[i] =
raft::device_span<vertex_t const>((*edge_partition_dcs_nzd_vertices_)[i].data(),
(*edge_partition_dcs_nzd_vertices_)[i].size());
}
}

Expand Down Expand Up @@ -196,15 +197,13 @@ class graph_t<vertex_t, edge_t, store_transposed, multi_gpu, std::enable_if_t<mu
}

return graph_view_t<vertex_t, edge_t, store_transposed, multi_gpu>(
*(this->handle_ptr()),
offsets,
indices,
dcs_nzd_vertices,
dcs_nzd_vertex_counts,
graph_view_meta_t<vertex_t, edge_t, store_transposed, multi_gpu>{
this->number_of_vertices(),
this->number_of_edges(),
this->graph_properties(),
this->properties_,
partition_,
edge_partition_segment_offsets_,
local_sorted_unique_edge_srcs,
Expand All @@ -224,7 +223,6 @@ class graph_t<vertex_t, edge_t, store_transposed, multi_gpu, std::enable_if_t<mu
// nzd: nonzero (local) degree
std::optional<std::vector<rmm::device_uvector<vertex_t>>> edge_partition_dcs_nzd_vertices_{
std::nullopt};
std::optional<std::vector<vertex_t>> edge_partition_dcs_nzd_vertex_counts_{std::nullopt};
partition_t<vertex_t> partition_{};

// segment offsets within the vertex partition based on vertex degree
Expand Down Expand Up @@ -283,16 +281,15 @@ class graph_t<vertex_t, edge_t, store_transposed, multi_gpu, std::enable_if_t<!m
graph_meta_t<vertex_t, edge_t, multi_gpu> meta,
bool do_expensive_check = false);

edge_t number_of_edges() const { return this->number_of_edges_; }

graph_view_t<vertex_t, edge_t, store_transposed, multi_gpu> view() const
{
return graph_view_t<vertex_t, edge_t, store_transposed, multi_gpu>(
*(this->handle_ptr()),
offsets_.data(),
indices_.data(),
graph_view_meta_t<vertex_t, edge_t, store_transposed, multi_gpu>{this->number_of_vertices(),
this->number_of_edges(),
this->graph_properties(),
segment_offsets_});
raft::device_span<edge_t const>(offsets_.data(), offsets_.size()),
raft::device_span<vertex_t const>(indices_.data(), indices_.size()),
graph_view_meta_t<vertex_t, edge_t, store_transposed, multi_gpu>{
this->number_of_vertices(), this->number_of_edges(), this->properties_, segment_offsets_});
}

private:
Expand Down
Loading

0 comments on commit 4fc915e

Please sign in to comment.