Skip to content

Commit

Permalink
Add clang-format confic to set columnlimit for Cpp to 88.
Browse files Browse the repository at this point in the history
  • Loading branch information
jatkinson1000 authored and TomMelt committed Dec 9, 2024
1 parent 9f3323b commit fad331e
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 48 deletions.
4 changes: 4 additions & 0 deletions .clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
---
Language: Cpp
ColumnLimit: 88
---
39 changes: 15 additions & 24 deletions src/ctorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ constexpr auto get_dtype(torch_data_t dtype) {
case torch_kFloat64:
return torch::kFloat64;
default:
std::cerr << "[WARNING]: unknown data type, setting to torch_kFloat32"
<< std::endl;
std::cerr << "[WARNING]: unknown data type, setting to torch_kFloat32" << std::endl;
return torch::kFloat32;
}
}
Expand All @@ -36,33 +35,28 @@ const auto get_device(torch_device_t device_type, int device_index) {
switch (device_type) {
case torch_kCPU:
if (device_index != -1) {
std::cerr << "[WARNING]: device index unused for CPU-only runs"
<< std::endl;
std::cerr << "[WARNING]: device index unused for CPU-only runs" << std::endl;
}
return torch::Device(torch::kCPU);
case torch_kCUDA:
if (device_index == -1) {
std::cerr << "[WARNING]: device index unset, defaulting to 0"
<< std::endl;
std::cerr << "[WARNING]: device index unset, defaulting to 0" << std::endl;
device_index = 0;
}
if (device_index >= 0 && device_index < torch::cuda::device_count()) {
return torch::Device(torch::kCUDA, device_index);
} else {
std::cerr << "[ERROR]: invalid device index " << device_index
<< " for device count " << torch::cuda::device_count()
<< std::endl;
<< " for device count " << torch::cuda::device_count() << std::endl;
exit(EXIT_FAILURE);
}
default:
std::cerr << "[WARNING]: unknown device type, setting to torch_kCPU"
<< std::endl;
std::cerr << "[WARNING]: unknown device type, setting to torch_kCPU" << std::endl;
return torch::Device(torch::kCPU);
}
}

void set_is_training(torch_jit_script_module_t module,
const bool is_training = false) {
void set_is_training(torch_jit_script_module_t module, const bool is_training = false) {
auto model = static_cast<torch::jit::script::Module *>(module);
if (is_training) {
model->train();
Expand Down Expand Up @@ -144,8 +138,7 @@ torch_tensor_t torch_empty(int ndim, const int64_t *shape, torch_data_t dtype,
// data
torch_tensor_t torch_from_blob(void *data, int ndim, const int64_t *shape,
const int64_t *strides, torch_data_t dtype,
torch_device_t device_type,
int device_index = -1,
torch_device_t device_type, int device_index = -1,
const bool requires_grad = false) {
torch::AutoGradMode enable_grad(requires_grad);
torch::Tensor *tensor = nullptr;
Expand All @@ -155,9 +148,8 @@ torch_tensor_t torch_from_blob(void *data, int ndim, const int64_t *shape,
c10::IntArrayRef vshape(shape, ndim);
c10::IntArrayRef vstrides(strides, ndim);
tensor = new torch::Tensor;
*tensor =
torch::from_blob(data, vshape, vstrides, torch::dtype(get_dtype(dtype)))
.to(get_device(device_type, device_index));
*tensor = torch::from_blob(data, vshape, vstrides, torch::dtype(get_dtype(dtype)))
.to(get_device(device_type, device_index));

} catch (const torch::Error &e) {
std::cerr << "[ERROR]: " << e.msg() << std::endl;
Expand Down Expand Up @@ -241,11 +233,11 @@ void torch_tensor_delete(torch_tensor_t tensor) {
delete t;
}

torch_jit_script_module_t
torch_jit_load(const char *filename,
const torch_device_t device_type = torch_kCPU,
const int device_index = -1, const bool requires_grad = false,
const bool is_training = false) {
torch_jit_script_module_t torch_jit_load(const char *filename,
const torch_device_t device_type = torch_kCPU,
const int device_index = -1,
const bool requires_grad = false,
const bool is_training = false) {
torch::AutoGradMode enable_grad(requires_grad);
torch::jit::script::Module *module = nullptr;
try {
Expand Down Expand Up @@ -304,8 +296,7 @@ void torch_jit_module_forward(const torch_jit_script_module_t module,
} else {
// If for some reason the forward method does not return a Tensor it
// should raise an error when trying to cast to a Tensor type
std::cerr << "[ERROR]: Model Output is neither Tensor nor Tuple."
<< std::endl;
std::cerr << "[ERROR]: Model Output is neither Tensor nor Tuple." << std::endl;
}
} catch (const torch::Error &e) {
std::cerr << "[ERROR]: " << e.msg() << std::endl;
Expand Down
44 changes: 20 additions & 24 deletions src/ctorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,9 @@ typedef enum { torch_kCPU, torch_kCUDA } torch_device_t;
* @param device index for the CUDA case
* @param whether gradient is required
*/
EXPORT_C torch_tensor_t torch_zeros(int ndim, const int64_t *shape,
torch_data_t dtype,
torch_device_t device_type,
int device_index, const bool requires_grad);
EXPORT_C torch_tensor_t torch_zeros(int ndim, const int64_t *shape, torch_data_t dtype,
torch_device_t device_type, int device_index,
const bool requires_grad);

/**
* Function to generate a Torch Tensor of ones
Expand All @@ -56,8 +55,7 @@ EXPORT_C torch_tensor_t torch_zeros(int ndim, const int64_t *shape,
* @param device index for the CUDA case
* @param whether gradient is required
*/
EXPORT_C torch_tensor_t torch_ones(int ndim, const int64_t *shape,
torch_data_t dtype,
EXPORT_C torch_tensor_t torch_ones(int ndim, const int64_t *shape, torch_data_t dtype,
torch_device_t device_type, int device_index,
const bool requires_grad);

Expand All @@ -70,10 +68,9 @@ EXPORT_C torch_tensor_t torch_ones(int ndim, const int64_t *shape,
* @param device index for the CUDA case
* @param whether gradient is required
*/
EXPORT_C torch_tensor_t torch_empty(int ndim, const int64_t *shape,
torch_data_t dtype,
torch_device_t device_type,
int device_index, const bool requires_grad);
EXPORT_C torch_tensor_t torch_empty(int ndim, const int64_t *shape, torch_data_t dtype,
torch_device_t device_type, int device_index,
const bool requires_grad);

/**
* Function to create a Torch Tensor from memory location given extra
Expand All @@ -88,10 +85,10 @@ EXPORT_C torch_tensor_t torch_empty(int ndim, const int64_t *shape,
* @param whether gradient is required
* @return Torch Tensor interpretation of the data pointed at
*/
EXPORT_C torch_tensor_t torch_from_blob(
void *data, int ndim, const int64_t *shape, const int64_t *strides,
torch_data_t dtype, torch_device_t device_type, int device_index,
const bool requires_grad);
EXPORT_C torch_tensor_t torch_from_blob(void *data, int ndim, const int64_t *shape,
const int64_t *strides, torch_data_t dtype,
torch_device_t device_type, int device_index,
const bool requires_grad);

/**
* Function to extract a C-array from a Torch Tensor's data.
Expand All @@ -100,8 +97,7 @@ EXPORT_C torch_tensor_t torch_from_blob(
* @param data type of the elements of the Tensor
* @return pointer to the Tensor in memory
*/
EXPORT_C void *torch_to_blob(const torch_tensor_t tensor,
const torch_data_t dtype);
EXPORT_C void *torch_to_blob(const torch_tensor_t tensor, const torch_data_t dtype);

/**
* Function to print out a Torch Tensor
Expand Down Expand Up @@ -131,8 +127,7 @@ EXPORT_C int torch_tensor_get_rank(const torch_tensor_t tensor);
#ifdef UNIX
EXPORT_C const long int *torch_tensor_get_sizes(const torch_tensor_t tensor);
#else
EXPORT_C const long long int *
torch_tensor_get_sizes(const torch_tensor_t tensor);
EXPORT_C const long long int *torch_tensor_get_sizes(const torch_tensor_t tensor);
#endif

/**
Expand All @@ -154,9 +149,11 @@ EXPORT_C void torch_tensor_delete(torch_tensor_t tensor);
* @param whether model is being trained
* @return Torch Module loaded in from file
*/
EXPORT_C torch_jit_script_module_t torch_jit_load(
const char *filename, const torch_device_t device_type,
const int device_index, const bool requires_grad, const bool is_training);
EXPORT_C torch_jit_script_module_t torch_jit_load(const char *filename,
const torch_device_t device_type,
const int device_index,
const bool requires_grad,
const bool is_training);

/**
* Function to run the `forward` method of a Torch Module
Expand All @@ -168,9 +165,8 @@ EXPORT_C torch_jit_script_module_t torch_jit_load(
* @param whether gradient is required
*/
EXPORT_C void torch_jit_module_forward(const torch_jit_script_module_t module,
const torch_tensor_t *inputs,
const int nin, torch_tensor_t *outputs,
const int nout,
const torch_tensor_t *inputs, const int nin,
torch_tensor_t *outputs, const int nout,
const bool requires_grad);

/**
Expand Down

0 comments on commit fad331e

Please sign in to comment.