Skip to content

Commit

Permalink
Formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
sbaldu committed Jan 6, 2024
1 parent 85e5f4a commit 9fb07d5
Show file tree
Hide file tree
Showing 6 changed files with 63 additions and 29 deletions.
12 changes: 9 additions & 3 deletions src/nnhep/headers/Activators.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,9 @@ namespace nnhep {
/// @return The derivative of the identity function.
///
/// @details The derivative of the identity function is always 0.
std::vector<double> grad(shared<Layer<T>> layer) { return std::vector<double>(layer->size(), 0.); }
std::vector<double> grad(shared<Layer<T>> layer) {
return std::vector<double>(layer->size(), 0.);
}
/// @brief The derivative of the identity function.
/// @param node_values The values of the nodes.
/// @return The derivative of the identity function.
Expand Down Expand Up @@ -131,7 +133,9 @@ namespace nnhep {
/// @brief The derivative of the linear activation function.
/// @param layer The layer of nodes.
/// @return The derivative of the linear activation function.
std::vector<double> grad(shared<Layer<T>> layer) { return std::vector<double>(layer->size(), 1); }
std::vector<double> grad(shared<Layer<T>> layer) {
return std::vector<double>(layer->size(), 1);
}
/// @brief The derivative of the linear activation function.
/// @param node_values The values of the nodes.
/// @return The derivative of the linear activation function.
Expand Down Expand Up @@ -188,7 +192,9 @@ namespace nnhep {
/// @brief The derivative of the sigmoid activation function.
/// @param activated_value The value of the activated node.
/// @return The derivative of the sigmoid activation function.
double grad(double activated_value) { return activated_value * (1 - activated_value); }
double grad(double activated_value) {
return activated_value * (1 - activated_value);
}
/// @brief The derivative of the sigmoid activation function.
/// @param layer The layer of nodes.
/// @return The derivative of the sigmoid activation function.
Expand Down
14 changes: 9 additions & 5 deletions src/nnhep/headers/ErrorFunction.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ namespace nnhep {
/// @details The error is calculated by taking the mean of the squared
/// difference between the node values and the expected values.
template <typename U>
double operator()(const std::vector<T>& node_values, const std::vector<U>& expected_values) {
double operator()(const std::vector<T>& node_values,
const std::vector<U>& expected_values) {
double error{};
const size_t N{node_values.size()};
for (size_t node_index{}; node_index < N; ++node_index) {
Expand Down Expand Up @@ -66,7 +67,8 @@ namespace nnhep {
int N{layers[layer_id]->size()};
std::vector<double> delta(N);
for (int node_index{}; node_index < N; ++node_index) {
delta[node_index] = (*layers[layer_id])[node_index] - static_cast<T>(expected_values[node_index]);
delta[node_index] = (*layers[layer_id])[node_index] -
static_cast<T>(expected_values[node_index]);
}

return delta;
Expand All @@ -76,9 +78,11 @@ namespace nnhep {
std::vector<double> delta(N);

for (int node_index{}; node_index < N; ++node_index) {
std::vector<double> previous_delta{grad(expected_values, layer_id + 1, layers, weights)};
delta[node_index] = act.grad((*layers[layer_id])[node_index]) *
(weights[layer_id]->transpose() * previous_delta)[node_index];
std::vector<double> previous_delta{
grad(expected_values, layer_id + 1, layers, weights)};
delta[node_index] =
act.grad((*layers[layer_id])[node_index]) *
(weights[layer_id]->transpose() * previous_delta)[node_index];
}

return delta;
Expand Down
9 changes: 6 additions & 3 deletions src/nnhep/headers/Layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ namespace nnhep {
Layer<T>::Layer(int n_nodes) : m_nodes(n_nodes), n_nodes{n_nodes} {}

template <typename T>
Layer<T>::Layer(std::vector<T> nodes) : m_nodes{std::move(nodes)}, n_nodes{m_nodes.size()} {}
Layer<T>::Layer(std::vector<T> nodes)
: m_nodes{std::move(nodes)}, n_nodes{m_nodes.size()} {}

template <typename T>
Layer<T>::Layer(std::stringstream& stream) {
Expand Down Expand Up @@ -110,7 +111,8 @@ namespace nnhep {
}
++node_index;
} catch (int num) {
std::cout << "The data provided exceedes the number of nodes expected for the layer\n";
std::cout
<< "The data provided exceedes the number of nodes expected for the layer\n";
}
}
}
Expand All @@ -123,7 +125,8 @@ namespace nnhep {
}
m_nodes[i] = value;
} catch (...) {
std::cout << "The index " << i << " is larger than the number of nodes in the layer\n";
std::cout << "The index " << i
<< " is larger than the number of nodes in the layer\n";
}
}

Expand Down
38 changes: 25 additions & 13 deletions src/nnhep/headers/Network.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,9 @@ namespace nnhep {
/// @param weight_matrix The weight matrix to use
/// @param bias_vector The bias vector to use
/// @return The values of the next layer
std::vector<T> forward_propatation(shared<Layer<T>>, shared<Matrix<W>>, shared<std::vector<W>>);
std::vector<T> forward_propatation(shared<Layer<T>>,
shared<Matrix<W>>,
shared<std::vector<W>>);
/// @brief Forward propagate the values of the network
///
/// @details This function is used to forward propagate the values of the
Expand Down Expand Up @@ -185,7 +187,8 @@ namespace nnhep {
m_bias(n_layers - 1) {
for (int i{}; i < n_layers - 1; ++i) {
m_layers[i] = std::make_shared<Layer<T>>(nodes_per_layer[i]);
m_weights[i] = std::make_shared<Matrix<W>>(nodes_per_layer[i + 1], nodes_per_layer[i]);
m_weights[i] =
std::make_shared<Matrix<W>>(nodes_per_layer[i + 1], nodes_per_layer[i]);
m_bias[i] = std::make_shared<std::vector<W>>(nodes_per_layer[i + 1]);

// Generate random weight matrices
Expand Down Expand Up @@ -231,7 +234,8 @@ namespace nnhep {
typename Activator,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
const shared<Matrix<W>> Network<T, W, Activator, Loss>::weight_matrix(int layer_id) const {
const shared<Matrix<W>> Network<T, W, Activator, Loss>::weight_matrix(
int layer_id) const {
return m_weights[layer_id];
}

Expand All @@ -251,7 +255,8 @@ namespace nnhep {
typename Activator,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
void Network<T, W, Activator, Loss>::set_matrix_data(int layer_id, Matrix<W> weight_matrix) {
void Network<T, W, Activator, Loss>::set_matrix_data(int layer_id,
Matrix<W> weight_matrix) {
m_weights[layer_id] = std::make_shared<Matrix<W>>(weight_matrix);
}

Expand All @@ -261,7 +266,8 @@ namespace nnhep {
typename Activator,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
void Network<T, W, Activator, Loss>::set_matrix_data(int layer_id, shared<Matrix<W>> weight_matrix_ptr) {
void Network<T, W, Activator, Loss>::set_matrix_data(
int layer_id, shared<Matrix<W>> weight_matrix_ptr) {
m_weights[layer_id] = weight_matrix_ptr;
}

Expand All @@ -271,7 +277,8 @@ namespace nnhep {
typename Activator,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
void Network<T, W, Activator, Loss>::set_bias_data(int layer_id, std::vector<W> bias_vector) {
void Network<T, W, Activator, Loss>::set_bias_data(int layer_id,
std::vector<W> bias_vector) {
m_bias[layer_id] = std::make_shared<std::vector<W>>(bias_vector);
}

Expand All @@ -281,7 +288,8 @@ namespace nnhep {
typename Activator,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
void Network<T, W, Activator, Loss>::set_bias_data(int layer_id, shared<std::vector<W>> bias_vector_ptr) {
void Network<T, W, Activator, Loss>::set_bias_data(
int layer_id, shared<std::vector<W>> bias_vector_ptr) {
m_bias[layer_id] = bias_vector_ptr;
}

Expand All @@ -291,9 +299,10 @@ namespace nnhep {
typename Activator,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
std::vector<T> Network<T, W, Activator, Loss>::forward_propatation(shared<Layer<T>> layer,
shared<Matrix<W>> weight_matrix,
shared<std::vector<W>> bias_vector) {
std::vector<T> Network<T, W, Activator, Loss>::forward_propatation(
shared<Layer<T>> layer,
shared<Matrix<W>> weight_matrix,
shared<std::vector<W>> bias_vector) {
std::vector<W> next_layer_nodes{*weight_matrix * layer->nodes() + *bias_vector};

return Activator<T>()(next_layer_nodes);
Expand All @@ -307,7 +316,8 @@ namespace nnhep {
typename Loss>
void Network<T, W, Activator, Loss>::forward_propatation() {
for (int i{}; i < n_layers - 1; ++i) {
std::vector<T> new_layer_data{forward_propatation(m_layers[i], m_weights[i], m_bias[i])};
std::vector<T> new_layer_data{
forward_propatation(m_layers[i], m_weights[i], m_bias[i])};
m_layers[i + 1]->set_node_data(new_layer_data);
}
}
Expand Down Expand Up @@ -336,7 +346,8 @@ namespace nnhep {
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
template <typename U>
void Network<T, W, Activator, Loss>::back_propagation(double eta, const std::vector<U>& target) {
void Network<T, W, Activator, Loss>::back_propagation(double eta,
const std::vector<U>& target) {
for (int layer_id{n_layers - 2}; layer_id >= 0; --layer_id) {
back_propagation(target, layer_id, eta);
}
Expand Down Expand Up @@ -395,7 +406,8 @@ namespace nnhep {
bias.push_back(std::stod(value));
}

m_weights[i] = std::make_shared<Matrix<W>>(m_weights[i]->nrows(), m_weights[i]->ncols(), weights);
m_weights[i] = std::make_shared<Matrix<W>>(
m_weights[i]->nrows(), m_weights[i]->ncols(), weights);
m_bias[i] = std::make_shared<std::vector<W>>(bias);
}
}
Expand Down
13 changes: 10 additions & 3 deletions test/cuda/MatrixTest/KernelTest/matrix.cu
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "doctest.h"

void verify_result(const std::vector<int> &a, const std::vector<int> &b, const std::vector<int> &c, int N) {
void verify_result(const std::vector<int> &a,
const std::vector<int> &b,
const std::vector<int> &c,
int N) {
for (int i{}; i < N; ++i) {
for (int j{}; j < N; ++j) {
int tmp{};
Expand All @@ -25,8 +28,12 @@ void verify_result(const std::vector<int> &a, const std::vector<int> &b, const s
std::cout << "Success\n";
}

void verify_result(
const std::vector<int> &a, const std::vector<int> &b, const std::vector<int> &c, int N, int K, int M) {
void verify_result(const std::vector<int> &a,
const std::vector<int> &b,
const std::vector<int> &c,
int N,
int K,
int M) {
for (int i{}; i < N; ++i) {
for (int j{}; j < M; ++j) {
int tmp{};
Expand Down
6 changes: 4 additions & 2 deletions test/serial/ActivatorsTest/Activators.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ TEST_CASE("Test the Elu functor") {
CHECK(e(0.) == 0.);
CHECK(e(std::numeric_limits<double>::max()) == std::numeric_limits<double>::max());
CHECK(e(-1) == 0.5 * (std::expm1(-1)));
CHECK(e(-std::numeric_limits<double>::max()) == 0.5 * (std::expm1(-std::numeric_limits<double>::max())));
CHECK(e(-std::numeric_limits<double>::max()) ==
0.5 * (std::expm1(-std::numeric_limits<double>::max())));
}

TEST_CASE("Test the Leaky Elu functor") {
Expand All @@ -43,6 +44,7 @@ TEST_CASE("Test the Leaky Elu functor") {
CHECK(le(0.) == 0.);
CHECK(le(1.) == 1.);
CHECK(le(std::numeric_limits<double>::max()) == std::numeric_limits<double>::max());
CHECK(le(-std::numeric_limits<double>::max()) == -0.1 * std::numeric_limits<double>::max());
CHECK(le(-std::numeric_limits<double>::max()) ==
-0.1 * std::numeric_limits<double>::max());
CHECK(le(-1) == -0.1);
}

0 comments on commit 9fb07d5

Please sign in to comment.