Skip to content

Commit

Permalink
c10::optional -> std::optional
Browse files Browse the repository at this point in the history
  • Loading branch information
r-barnes committed Jan 9, 2025
1 parent 7cabb53 commit 82f34d0
Show file tree
Hide file tree
Showing 16 changed files with 132 additions and 132 deletions.
8 changes: 4 additions & 4 deletions csrc/cpu/scatter_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
#include "reducer.h"
#include "utils.h"

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
scatter_cpu(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce) {
CHECK_CPU(src);
CHECK_CPU(index);
if (optional_out.has_value())
Expand Down Expand Up @@ -36,7 +36,7 @@ scatter_cpu(torch::Tensor src, torch::Tensor index, int64_t dim,
out = torch::empty(sizes, src.options());
}

torch::optional<torch::Tensor> arg_out = torch::nullopt;
std::optional<torch::Tensor> arg_out = std::nullopt;
int64_t *arg_out_data = nullptr;
if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) {
arg_out = torch::full_like(out, src.size(dim), index.options());
Expand Down
6 changes: 3 additions & 3 deletions csrc/cpu/scatter_cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#include "../extensions.h"

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
scatter_cpu(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce);
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce);
10 changes: 5 additions & 5 deletions csrc/cpu/segment_coo_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
#include "utils.h"
#include <ATen/OpMathType.h>

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
segment_coo_cpu(torch::Tensor src, torch::Tensor index,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce) {
CHECK_CPU(src);
CHECK_CPU(index);
if (optional_out.has_value())
Expand Down Expand Up @@ -45,7 +45,7 @@ segment_coo_cpu(torch::Tensor src, torch::Tensor index,
out = torch::empty(sizes, src.options());
}

torch::optional<torch::Tensor> arg_out = torch::nullopt;
std::optional<torch::Tensor> arg_out = std::nullopt;
int64_t *arg_out_data = nullptr;
if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) {
arg_out = torch::full_like(out, src.size(dim), index.options());
Expand Down Expand Up @@ -141,7 +141,7 @@ segment_coo_cpu(torch::Tensor src, torch::Tensor index,
}

torch::Tensor gather_coo_cpu(torch::Tensor src, torch::Tensor index,
torch::optional<torch::Tensor> optional_out) {
std::optional<torch::Tensor> optional_out) {
CHECK_CPU(src);
CHECK_CPU(index);
if (optional_out.has_value())
Expand Down
8 changes: 4 additions & 4 deletions csrc/cpu/segment_coo_cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

#include "../extensions.h"

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
segment_coo_cpu(torch::Tensor src, torch::Tensor index,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce);
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce);

torch::Tensor gather_coo_cpu(torch::Tensor src, torch::Tensor index,
torch::optional<torch::Tensor> optional_out);
std::optional<torch::Tensor> optional_out);
8 changes: 4 additions & 4 deletions csrc/cpu/segment_csr_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#include "utils.h"
#include <ATen/OpMathType.h>

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
segment_csr_cpu(torch::Tensor src, torch::Tensor indptr,
torch::optional<torch::Tensor> optional_out,
std::optional<torch::Tensor> optional_out,
std::string reduce) {
CHECK_CPU(src);
CHECK_CPU(indptr);
Expand Down Expand Up @@ -38,7 +38,7 @@ segment_csr_cpu(torch::Tensor src, torch::Tensor indptr,
out = torch::empty(sizes, src.options());
}

torch::optional<torch::Tensor> arg_out = torch::nullopt;
std::optional<torch::Tensor> arg_out = std::nullopt;
int64_t *arg_out_data = nullptr;
if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) {
arg_out = torch::full(out.sizes(), src.size(dim), indptr.options());
Expand Down Expand Up @@ -92,7 +92,7 @@ segment_csr_cpu(torch::Tensor src, torch::Tensor indptr,
}

torch::Tensor gather_csr_cpu(torch::Tensor src, torch::Tensor indptr,
torch::optional<torch::Tensor> optional_out) {
std::optional<torch::Tensor> optional_out) {
CHECK_CPU(src);
CHECK_CPU(indptr);
if (optional_out.has_value())
Expand Down
6 changes: 3 additions & 3 deletions csrc/cpu/segment_csr_cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

#include "../extensions.h"

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
segment_csr_cpu(torch::Tensor src, torch::Tensor indptr,
torch::optional<torch::Tensor> optional_out,
std::optional<torch::Tensor> optional_out,
std::string reduce);

torch::Tensor gather_csr_cpu(torch::Tensor src, torch::Tensor indptr,
torch::optional<torch::Tensor> optional_out);
std::optional<torch::Tensor> optional_out);
8 changes: 4 additions & 4 deletions csrc/cuda/scatter_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,10 @@ scatter_arg_kernel(const scalar_t *src_data,
}
}

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
scatter_cuda(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce) {
CHECK_CUDA(src);
CHECK_CUDA(index);
if (optional_out.has_value())
Expand Down Expand Up @@ -89,7 +89,7 @@ scatter_cuda(torch::Tensor src, torch::Tensor index, int64_t dim,
out = torch::empty(sizes, src.options());
}

torch::optional<torch::Tensor> arg_out = torch::nullopt;
std::optional<torch::Tensor> arg_out = std::nullopt;
int64_t *arg_out_data = nullptr;
if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) {
arg_out = torch::full_like(out, src.size(dim), index.options());
Expand Down
6 changes: 3 additions & 3 deletions csrc/cuda/scatter_cuda.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#include "../extensions.h"

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
scatter_cuda(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce);
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce);
10 changes: 5 additions & 5 deletions csrc/cuda/segment_coo_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -149,10 +149,10 @@ __global__ void segment_coo_arg_broadcast_kernel(
}
}

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
segment_coo_cuda(torch::Tensor src, torch::Tensor index,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce) {
CHECK_CUDA(src);
CHECK_CUDA(index);
if (optional_out.has_value())
Expand Down Expand Up @@ -191,7 +191,7 @@ segment_coo_cuda(torch::Tensor src, torch::Tensor index,
out = torch::zeros(sizes, src.options());
}

torch::optional<torch::Tensor> arg_out = torch::nullopt;
std::optional<torch::Tensor> arg_out = std::nullopt;
int64_t *arg_out_data = nullptr;
if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) {
arg_out = torch::full_like(out, src.size(dim), index.options());
Expand Down Expand Up @@ -325,7 +325,7 @@ __global__ void gather_coo_broadcast_kernel(
}
torch::Tensor gather_coo_cuda(torch::Tensor src, torch::Tensor index,
torch::optional<torch::Tensor> optional_out) {
std::optional<torch::Tensor> optional_out) {
CHECK_CUDA(src);
CHECK_CUDA(index);
if (optional_out.has_value())
Expand Down
8 changes: 4 additions & 4 deletions csrc/cuda/segment_coo_cuda.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

#include "../extensions.h"

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
segment_coo_cuda(torch::Tensor src, torch::Tensor index,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce);
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce);

torch::Tensor gather_coo_cuda(torch::Tensor src, torch::Tensor index,
torch::optional<torch::Tensor> optional_out);
std::optional<torch::Tensor> optional_out);
8 changes: 4 additions & 4 deletions csrc/cuda/segment_csr_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,9 @@ __global__ void segment_csr_broadcast_kernel(
}
}

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
segment_csr_cuda(torch::Tensor src, torch::Tensor indptr,
torch::optional<torch::Tensor> optional_out,
std::optional<torch::Tensor> optional_out,
std::string reduce) {
CHECK_CUDA(src);
CHECK_CUDA(indptr);
Expand Down Expand Up @@ -128,7 +128,7 @@ segment_csr_cuda(torch::Tensor src, torch::Tensor indptr,
out = torch::empty(sizes, src.options());
}

torch::optional<torch::Tensor> arg_out = torch::nullopt;
std::optional<torch::Tensor> arg_out = std::nullopt;
int64_t *arg_out_data = nullptr;
if (reduce2REDUCE.at(reduce) == MIN || reduce2REDUCE.at(reduce) == MAX) {
arg_out = torch::full(out.sizes(), src.size(dim), indptr.options());
Expand Down Expand Up @@ -217,7 +217,7 @@ __global__ void gather_csr_broadcast_kernel(
}

torch::Tensor gather_csr_cuda(torch::Tensor src, torch::Tensor indptr,
torch::optional<torch::Tensor> optional_out) {
std::optional<torch::Tensor> optional_out) {
CHECK_CUDA(src);
CHECK_CUDA(indptr);
if (optional_out.has_value())
Expand Down
6 changes: 3 additions & 3 deletions csrc/cuda/segment_csr_cuda.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

#include "../extensions.h"

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
segment_csr_cuda(torch::Tensor src, torch::Tensor indptr,
torch::optional<torch::Tensor> optional_out,
std::optional<torch::Tensor> optional_out,
std::string reduce);

torch::Tensor gather_csr_cuda(torch::Tensor src, torch::Tensor indptr,
torch::optional<torch::Tensor> optional_out);
std::optional<torch::Tensor> optional_out);
48 changes: 24 additions & 24 deletions csrc/scatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ torch::Tensor broadcast(torch::Tensor src, torch::Tensor other, int64_t dim) {
return src;
}

std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
std::tuple<torch::Tensor, std::optional<torch::Tensor>>
scatter_fw(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size, std::string reduce) {
if (src.device().is_cuda()) {
#ifdef WITH_CUDA
return scatter_cuda(src, index, dim, optional_out, dim_size, reduce);
Expand All @@ -55,8 +55,8 @@ class ScatterSum : public torch::autograd::Function<ScatterSum> {
public:
static variable_list forward(AutogradContext *ctx, Variable src,
Variable index, int64_t dim,
torch::optional<Variable> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<Variable> optional_out,
std::optional<int64_t> dim_size) {
dim = dim < 0 ? src.dim() + dim : dim;
ctx->saved_data["dim"] = dim;
ctx->saved_data["src_shape"] = src.sizes();
Expand Down Expand Up @@ -84,8 +84,8 @@ class ScatterMul : public torch::autograd::Function<ScatterMul> {
public:
static variable_list forward(AutogradContext *ctx, Variable src,
Variable index, int64_t dim,
torch::optional<Variable> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<Variable> optional_out,
std::optional<int64_t> dim_size) {
dim = dim < 0 ? src.dim() + dim : dim;
ctx->saved_data["dim"] = dim;
ctx->saved_data["src_shape"] = src.sizes();
Expand Down Expand Up @@ -116,8 +116,8 @@ class ScatterMean : public torch::autograd::Function<ScatterMean> {
public:
static variable_list forward(AutogradContext *ctx, Variable src,
Variable index, int64_t dim,
torch::optional<Variable> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<Variable> optional_out,
std::optional<int64_t> dim_size) {
dim = dim < 0 ? src.dim() + dim : dim;
ctx->saved_data["dim"] = dim;
ctx->saved_data["src_shape"] = src.sizes();
Expand All @@ -131,7 +131,7 @@ class ScatterMean : public torch::autograd::Function<ScatterMean> {
auto ones = torch::ones(old_index.sizes(), src.options());
result = scatter_fw(ones, old_index,
old_index.dim() <= dim ? old_index.dim() - 1 : dim,
torch::nullopt, out.size(dim), "sum");
std::nullopt, out.size(dim), "sum");
auto count = std::get<0>(result);
count.masked_fill_(count < 1, 1);
count = broadcast(count, out, dim);
Expand Down Expand Up @@ -164,8 +164,8 @@ class ScatterMin : public torch::autograd::Function<ScatterMin> {
public:
static variable_list forward(AutogradContext *ctx, Variable src,
Variable index, int64_t dim,
torch::optional<Variable> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<Variable> optional_out,
std::optional<int64_t> dim_size) {
dim = dim < 0 ? src.dim() + dim : dim;
ctx->saved_data["dim"] = dim;
ctx->saved_data["src_shape"] = src.sizes();
Expand Down Expand Up @@ -200,8 +200,8 @@ class ScatterMax : public torch::autograd::Function<ScatterMax> {
public:
static variable_list forward(AutogradContext *ctx, Variable src,
Variable index, int64_t dim,
torch::optional<Variable> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<Variable> optional_out,
std::optional<int64_t> dim_size) {
dim = dim < 0 ? src.dim() + dim : dim;
ctx->saved_data["dim"] = dim;
ctx->saved_data["src_shape"] = src.sizes();
Expand Down Expand Up @@ -234,37 +234,37 @@ class ScatterMax : public torch::autograd::Function<ScatterMax> {

SCATTER_API torch::Tensor
scatter_sum(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size) {
return ScatterSum::apply(src, index, dim, optional_out, dim_size)[0];
}

SCATTER_API torch::Tensor
scatter_mul(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size) {
return ScatterMul::apply(src, index, dim, optional_out, dim_size)[0];
}

SCATTER_API torch::Tensor
scatter_mean(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size) {
return ScatterMean::apply(src, index, dim, optional_out, dim_size)[0];
}

SCATTER_API std::tuple<torch::Tensor, torch::Tensor>
scatter_min(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size) {
auto result = ScatterMin::apply(src, index, dim, optional_out, dim_size);
return std::make_tuple(result[0], result[1]);
}

SCATTER_API std::tuple<torch::Tensor, torch::Tensor>
scatter_max(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size) {
std::optional<torch::Tensor> optional_out,
std::optional<int64_t> dim_size) {
auto result = ScatterMax::apply(src, index, dim, optional_out, dim_size);
return std::make_tuple(result[0], result[1]);
}
Expand Down
Loading

0 comments on commit 82f34d0

Please sign in to comment.