From ed2dad1c897477d0ea752b94b48a2a2569c43f79 Mon Sep 17 00:00:00 2001 From: Simone Balducci <93096843+sbaldu@users.noreply.github.com> Date: Sat, 6 Jan 2024 21:32:54 +0100 Subject: [PATCH] Improve access operator (#24) * Separate `Matrix::get` in const and non-const * Change `Matrix::get` into `operator()` * Update definitions * Update tests --- src/cuda/DataFormats/Matrix.h | 23 +++++--- src/cuda/DataFormats/VectorKernels.h | 5 +- src/cuda/include/ErrorFunction.h | 9 ++- src/cuda/include/Layer.h | 9 ++- src/cuda/include/Network.h | 35 ++++++++---- src/nnhep/DataFormats/Matrix.hpp | 57 +++++++++++++------ src/nnhep/headers/Activators.hpp | 12 +++- src/nnhep/headers/ErrorFunction.hpp | 14 +++-- src/nnhep/headers/Layer.hpp | 9 ++- src/nnhep/headers/Network.hpp | 38 ++++++++----- test/cuda/MatrixTest/KernelTest/matrix.cu | 13 ++++- test/serial/ActivatorsTest/Activators.cc | 6 +- .../serial/MatrixTest/MatrixMultiplication.cc | 10 ++-- test/serial/MatrixTest/MatrixSum.cc | 8 +-- test/serial/MatrixTest/MatrixTransposition.cc | 4 +- 15 files changed, 170 insertions(+), 82 deletions(-) diff --git a/src/cuda/DataFormats/Matrix.h b/src/cuda/DataFormats/Matrix.h index de2351b..808d3f2 100644 --- a/src/cuda/DataFormats/Matrix.h +++ b/src/cuda/DataFormats/Matrix.h @@ -65,10 +65,12 @@ class Matrix { friend __host__ Matrix operator*(const Matrix& m1, const Matrix& m2); template - friend __host__ std::vector operator*(const Matrix& matrix, const std::vector& vec); + friend __host__ std::vector operator*(const Matrix& matrix, + const std::vector& vec); template E> - friend __host__ std::vector operator*(const Matrix& matrix, const std::vector& vec); + friend __host__ std::vector operator*(const Matrix& matrix, + const std::vector& vec); __host__ Matrix& operator+=(const Matrix& other); template @@ -90,7 +92,10 @@ class Matrix { template Matrix::Matrix(int n_rows, int n_cols) - : m_nrows{n_rows}, m_ncols{n_cols}, m_size{n_rows * n_cols}, m_data(n_rows * n_cols) {} + : m_nrows{n_rows}, + m_ncols{n_cols}, + m_size{n_rows * n_cols}, + m_data(n_rows * n_cols) {} template template @@ -270,7 +275,8 @@ Matrix operator*(const Matrix& m1, const Matrix& m2) { throw(0); } } catch (int num) { - std::cout << "The two matrices can't be multiplied because their dimensions are not compatible. \n"; + std::cout << "The two matrices can't be multiplied because their dimensions are not " + "compatible. \n"; } Matrix result(N, M); @@ -302,7 +308,8 @@ Matrix operator*(const Matrix& m1, const Matrix& m2) { cudaMemcpy(m_b.data, m2.data().data(), size_b, cudaMemcpyHostToDevice); const size_t shared_size{2 * block_size * block_size * sizeof(T)}; matrix_multiply<<>>(m_a, m_b, m_c, block_size); - cudaMemcpy(const_cast(result.data().data()), m_c.data, size_c, cudaMemcpyDeviceToHost); + cudaMemcpy( + const_cast(result.data().data()), m_c.data, size_c, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); @@ -322,7 +329,8 @@ Matrix operator*(const Matrix& m1, const Matrix& m2) { throw(0); } } catch (int num) { - std::cout << "The two matrices can't be multiplied because their dimensions are not compatible. \n"; + std::cout << "The two matrices can't be multiplied because their dimensions are not " + "compatible. \n"; } Matrix result(N, M); @@ -355,7 +363,8 @@ Matrix operator*(const Matrix& m1, const Matrix& m2) { cudaMemcpy(m_b.data, m2.data().data(), size_b, cudaMemcpyHostToDevice); const size_t shared_size{2 * block_size * block_size * sizeof(T)}; matrix_multiply<<>>(m_a, m_b, m_c, block_size); - cudaMemcpy(const_cast(result.data().data()), m_c.data, size_c, cudaMemcpyDeviceToHost); + cudaMemcpy( + const_cast(result.data().data()), m_c.data, size_c, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); diff --git a/src/cuda/DataFormats/VectorKernels.h b/src/cuda/DataFormats/VectorKernels.h index e1ce98a..4f16a76 100644 --- a/src/cuda/DataFormats/VectorKernels.h +++ b/src/cuda/DataFormats/VectorKernels.h @@ -73,7 +73,10 @@ __global__ void vec_divide(T* a, E constant, int n) { } template -__global__ void matrix_multiply(const matrix_t a, const matrix_t b, matrix_t c, int block_size) { +__global__ void matrix_multiply(const matrix_t a, + const matrix_t b, + matrix_t c, + int block_size) { // Allocate memory on shared memory extern __shared__ int shared[]; int* s_a{shared}; diff --git a/src/cuda/include/ErrorFunction.h b/src/cuda/include/ErrorFunction.h index 7fe7bdf..bd17c66 100644 --- a/src/cuda/include/ErrorFunction.h +++ b/src/cuda/include/ErrorFunction.h @@ -14,7 +14,8 @@ using shared = std::shared_ptr; template typename Activator> struct MeanSquaredError { - __host__ double operator()(const std::vector& node_values, const std::vector& expected_values) { + __host__ double operator()(const std::vector& node_values, + const std::vector& expected_values) { double error{}; int N{node_values.size()}; for (int node_index{}; node_index < N; ++node_index) { @@ -35,7 +36,8 @@ struct MeanSquaredError { int N{layers[layer_id]->size()}; std::vector delta(N); for (int node_index{}; node_index < N; ++node_index) { - delta[node_index] = (*layers[layer_id])[node_index] - static_cast(expected_values[node_index]); + delta[node_index] = + (*layers[layer_id])[node_index] - static_cast(expected_values[node_index]); } return delta; @@ -45,7 +47,8 @@ struct MeanSquaredError { std::vector delta(N); for (int node_index{}; node_index < N; ++node_index) { - std::vector previous_delta{grad(expected_values, layer_id + 1, layers, weights)}; + std::vector previous_delta{ + grad(expected_values, layer_id + 1, layers, weights)}; delta[node_index] = act.grad((*layers[layer_id])[node_index]) * (weights[layer_id]->transpose() * previous_delta)[node_index]; } diff --git a/src/cuda/include/Layer.h b/src/cuda/include/Layer.h index bfbb9b9..41ba79b 100644 --- a/src/cuda/include/Layer.h +++ b/src/cuda/include/Layer.h @@ -47,7 +47,8 @@ template Layer::Layer(int n_nodes) : m_nodes(n_nodes), n_nodes{n_nodes} {} template -Layer::Layer(std::vector nodes) : m_nodes{std::move(nodes)}, n_nodes{m_nodes.size()} {} +Layer::Layer(std::vector nodes) + : m_nodes{std::move(nodes)}, n_nodes{m_nodes.size()} {} template Layer::Layer(std::stringstream& stream) { @@ -77,7 +78,8 @@ void Layer::load(std::stringstream& stream) { } ++node_index; } catch (int num) { - std::cout << "The data provided exceedes the number of nodes expected for the layer\n"; + std::cout + << "The data provided exceedes the number of nodes expected for the layer\n"; } } } @@ -90,7 +92,8 @@ void Layer::set_node_data(int i, T value) { } m_nodes[i] = value; } catch (...) { - std::cout << "The index " << i << " is larger than the number of nodes in the layer\n"; + std::cout << "The index " << i + << " is larger than the number of nodes in the layer\n"; } } diff --git a/src/cuda/include/Network.h b/src/cuda/include/Network.h index 9f5ba83..21183de 100644 --- a/src/cuda/include/Network.h +++ b/src/cuda/include/Network.h @@ -54,7 +54,9 @@ class Network { void set_bias_data(int layer_id, std::vector bias_vector); void set_bias_data(int layer_id, shared> bias_vector_ptr); - std::vector forward_propatation(shared>, shared>, shared>); + std::vector forward_propatation(shared>, + shared>, + shared>); void forward_propatation(); template @@ -94,7 +96,8 @@ Network::Network(const std::vector& nodes_per_layer) m_bias(n_layers - 1) { for (int i{}; i < n_layers - 1; ++i) { m_layers[i] = std::make_shared>(nodes_per_layer[i]); - m_weights[i] = std::make_shared>(nodes_per_layer[i + 1], nodes_per_layer[i]); + m_weights[i] = + std::make_shared>(nodes_per_layer[i + 1], nodes_per_layer[i]); m_bias[i] = std::make_shared>(nodes_per_layer[i + 1]); // Generate random weight matrices @@ -160,7 +163,8 @@ template typename Act> typename Loss> -void Network::set_matrix_data(int layer_id, Matrix weight_matrix) { +void Network::set_matrix_data(int layer_id, + Matrix weight_matrix) { m_weights[layer_id] = std::make_shared>(weight_matrix); } @@ -170,7 +174,8 @@ template typename Act> typename Loss> -void Network::set_matrix_data(int layer_id, shared> weight_matrix_ptr) { +void Network::set_matrix_data( + int layer_id, shared> weight_matrix_ptr) { m_weights[layer_id] = weight_matrix_ptr; } @@ -180,7 +185,8 @@ template typename Act> typename Loss> -void Network::set_bias_data(int layer_id, std::vector bias_vector) { +void Network::set_bias_data(int layer_id, + std::vector bias_vector) { m_bias[layer_id] = std::make_shared>(bias_vector); } @@ -190,7 +196,8 @@ template typename Act> typename Loss> -void Network::set_bias_data(int layer_id, shared> bias_vector_ptr) { +void Network::set_bias_data( + int layer_id, shared> bias_vector_ptr) { m_bias[layer_id] = bias_vector_ptr; } @@ -200,9 +207,10 @@ template typename Act> typename Loss> -std::vector Network::forward_propatation(shared> layer, - shared> weight_matrix, - shared> bias_vector) { +std::vector Network::forward_propatation( + shared> layer, + shared> weight_matrix, + shared> bias_vector) { std::vector next_layer_nodes{*weight_matrix * layer->nodes() + *bias_vector}; return Activator()(next_layer_nodes); @@ -216,7 +224,8 @@ template void Network::forward_propatation() { for (int i{}; i < n_layers - 1; ++i) { - std::vector new_layer_data{forward_propatation(m_layers[i], m_weights[i], m_bias[i])}; + std::vector new_layer_data{ + forward_propatation(m_layers[i], m_weights[i], m_bias[i])}; m_layers[i + 1]->set_node_data(new_layer_data); } } @@ -245,7 +254,8 @@ template typename Act> typename Loss> template -void Network::back_propagation(double eta, const std::vector& target) { +void Network::back_propagation(double eta, + const std::vector& target) { for (int layer_id{n_layers - 2}; layer_id >= 0; --layer_id) { back_propagation(target, layer_id, eta); } @@ -304,7 +314,8 @@ void Network::import_network(const std::string& filepath) bias.push_back(std::stod(value)); } - m_weights[i] = std::make_shared>(m_weights[i]->nrows(), m_weights[i]->ncols(), weights); + m_weights[i] = std::make_shared>( + m_weights[i]->nrows(), m_weights[i]->ncols(), weights); m_bias[i] = std::make_shared>(bias); } } diff --git a/src/nnhep/DataFormats/Matrix.hpp b/src/nnhep/DataFormats/Matrix.hpp index 80b56a3..893b4a2 100644 --- a/src/nnhep/DataFormats/Matrix.hpp +++ b/src/nnhep/DataFormats/Matrix.hpp @@ -88,7 +88,16 @@ namespace nnhep { /// @param i The row index of the element to get /// @param j The column index of the element to get /// @return The element of the matrix - inline const T get(int i, int j) const; + /// + /// @note returns by reference + inline T& operator()(int i, int j); + /// @brief Get an element of the matrix + /// @param i The row index of the element to get + /// @param j The column index of the element to get + /// @return The element of the matrix + /// + /// @note returns by const reference + inline const T& operator()(int i, int j) const; /// inline Matrix transpose(); @@ -193,7 +202,10 @@ namespace nnhep { template Matrix::Matrix(int n_rows, int n_cols) - : m_nrows{n_rows}, m_ncols{n_cols}, m_size{n_rows * n_cols}, m_data(n_rows * n_cols) {} + : m_nrows{n_rows}, + m_ncols{n_cols}, + m_size{n_rows * n_cols}, + m_data(n_rows * n_cols) {} template template @@ -276,7 +288,12 @@ namespace nnhep { } template - const T Matrix::get(int i, int j) const { + T& Matrix::operator()(int i, int j) { + return m_data[j + m_ncols * i]; + } + + template + const T& Matrix::operator()(int i, int j) const { return m_data[j + m_ncols * i]; } @@ -286,7 +303,7 @@ namespace nnhep { for (int i{}; i < this->m_nrows; ++i) { for (int j{}; j < this->m_ncols; ++j) { - matrix.set_data(j, i, this->get(i, j)); + matrix.set_data(j, i, (*this)(i, j)); } } @@ -335,14 +352,15 @@ namespace nnhep { throw(0); } } catch (int num) { - std::cout << "The two matrices can't be multiplied because their dimensions are not compatible. \n"; + std::cout << "The two matrices can't be multiplied because their dimensions are " + "not compatible. \n"; } for (int i{}; i < m1.m_nrows; ++i) { for (int j{}; j < m2.m_ncols; ++j) { T sum{}; for (int k{}; k < m1.m_ncols; ++k) { - sum += m1.get(i, k) * m2.get(k, j); + sum += m1(i, k) * m2(k, j); } result.set_data(i, j, sum); } @@ -360,14 +378,15 @@ namespace nnhep { throw(0); } } catch (int num) { - std::cout << "The two matrices can't be multiplied because their dimensions are not compatible. \n"; + std::cout << "The two matrices can't be multiplied because their dimensions are " + "not compatible. \n"; } for (int i{}; i < m1.m_nrows; ++i) { for (int j{}; j < m2.m_ncols; ++j) { T sum{}; for (int k{}; k < m1.m_ncols; ++k) { - sum += m1.get(i, k) * m2.get(k, j); + sum += m1(i, k) * m2(k, j); } result.set_data(i, j, sum); } @@ -447,10 +466,13 @@ namespace nnhep { template template Matrix& Matrix::operator*=(E constant) { - std::transform(this->m_data.cbegin(), this->m_data.cend(), this->m_data.begin(), [constant](auto x) { - x *= constant; - return x; - }); + std::transform(this->m_data.cbegin(), + this->m_data.cend(), + this->m_data.begin(), + [constant](auto x) { + x *= constant; + return x; + }); return *this; } @@ -458,10 +480,13 @@ namespace nnhep { template template Matrix& Matrix::operator/=(E constant) { - std::transform(this->m_data.cbegin(), this->m_data.cend(), this->m_data.begin(), [constant](auto x) { - x /= constant; - return x; - }); + std::transform(this->m_data.cbegin(), + this->m_data.cend(), + this->m_data.begin(), + [constant](auto x) { + x /= constant; + return x; + }); return *this; } diff --git a/src/nnhep/headers/Activators.hpp b/src/nnhep/headers/Activators.hpp index ead057a..46ff9d6 100644 --- a/src/nnhep/headers/Activators.hpp +++ b/src/nnhep/headers/Activators.hpp @@ -76,7 +76,9 @@ namespace nnhep { /// @return The derivative of the identity function. /// /// @details The derivative of the identity function is always 0. - std::vector grad(shared> layer) { return std::vector(layer->size(), 0.); } + std::vector grad(shared> layer) { + return std::vector(layer->size(), 0.); + } /// @brief The derivative of the identity function. /// @param node_values The values of the nodes. /// @return The derivative of the identity function. @@ -131,7 +133,9 @@ namespace nnhep { /// @brief The derivative of the linear activation function. /// @param layer The layer of nodes. /// @return The derivative of the linear activation function. - std::vector grad(shared> layer) { return std::vector(layer->size(), 1); } + std::vector grad(shared> layer) { + return std::vector(layer->size(), 1); + } /// @brief The derivative of the linear activation function. /// @param node_values The values of the nodes. /// @return The derivative of the linear activation function. @@ -188,7 +192,9 @@ namespace nnhep { /// @brief The derivative of the sigmoid activation function. /// @param activated_value The value of the activated node. /// @return The derivative of the sigmoid activation function. - double grad(double activated_value) { return activated_value * (1 - activated_value); } + double grad(double activated_value) { + return activated_value * (1 - activated_value); + } /// @brief The derivative of the sigmoid activation function. /// @param layer The layer of nodes. /// @return The derivative of the sigmoid activation function. diff --git a/src/nnhep/headers/ErrorFunction.hpp b/src/nnhep/headers/ErrorFunction.hpp index 9319a4d..1177c04 100644 --- a/src/nnhep/headers/ErrorFunction.hpp +++ b/src/nnhep/headers/ErrorFunction.hpp @@ -37,7 +37,8 @@ namespace nnhep { /// @details The error is calculated by taking the mean of the squared /// difference between the node values and the expected values. template - double operator()(const std::vector& node_values, const std::vector& expected_values) { + double operator()(const std::vector& node_values, + const std::vector& expected_values) { double error{}; const size_t N{node_values.size()}; for (size_t node_index{}; node_index < N; ++node_index) { @@ -66,7 +67,8 @@ namespace nnhep { int N{layers[layer_id]->size()}; std::vector delta(N); for (int node_index{}; node_index < N; ++node_index) { - delta[node_index] = (*layers[layer_id])[node_index] - static_cast(expected_values[node_index]); + delta[node_index] = (*layers[layer_id])[node_index] - + static_cast(expected_values[node_index]); } return delta; @@ -76,9 +78,11 @@ namespace nnhep { std::vector delta(N); for (int node_index{}; node_index < N; ++node_index) { - std::vector previous_delta{grad(expected_values, layer_id + 1, layers, weights)}; - delta[node_index] = act.grad((*layers[layer_id])[node_index]) * - (weights[layer_id]->transpose() * previous_delta)[node_index]; + std::vector previous_delta{ + grad(expected_values, layer_id + 1, layers, weights)}; + delta[node_index] = + act.grad((*layers[layer_id])[node_index]) * + (weights[layer_id]->transpose() * previous_delta)[node_index]; } return delta; diff --git a/src/nnhep/headers/Layer.hpp b/src/nnhep/headers/Layer.hpp index d3fb7fd..e8f6bf7 100644 --- a/src/nnhep/headers/Layer.hpp +++ b/src/nnhep/headers/Layer.hpp @@ -80,7 +80,8 @@ namespace nnhep { Layer::Layer(int n_nodes) : m_nodes(n_nodes), n_nodes{n_nodes} {} template - Layer::Layer(std::vector nodes) : m_nodes{std::move(nodes)}, n_nodes{m_nodes.size()} {} + Layer::Layer(std::vector nodes) + : m_nodes{std::move(nodes)}, n_nodes{m_nodes.size()} {} template Layer::Layer(std::stringstream& stream) { @@ -110,7 +111,8 @@ namespace nnhep { } ++node_index; } catch (int num) { - std::cout << "The data provided exceedes the number of nodes expected for the layer\n"; + std::cout + << "The data provided exceedes the number of nodes expected for the layer\n"; } } } @@ -123,7 +125,8 @@ namespace nnhep { } m_nodes[i] = value; } catch (...) { - std::cout << "The index " << i << " is larger than the number of nodes in the layer\n"; + std::cout << "The index " << i + << " is larger than the number of nodes in the layer\n"; } } diff --git a/src/nnhep/headers/Network.hpp b/src/nnhep/headers/Network.hpp index 6d83c43..08de76e 100644 --- a/src/nnhep/headers/Network.hpp +++ b/src/nnhep/headers/Network.hpp @@ -102,7 +102,9 @@ namespace nnhep { /// @param weight_matrix The weight matrix to use /// @param bias_vector The bias vector to use /// @return The values of the next layer - std::vector forward_propatation(shared>, shared>, shared>); + std::vector forward_propatation(shared>, + shared>, + shared>); /// @brief Forward propagate the values of the network /// /// @details This function is used to forward propagate the values of the @@ -185,7 +187,8 @@ namespace nnhep { m_bias(n_layers - 1) { for (int i{}; i < n_layers - 1; ++i) { m_layers[i] = std::make_shared>(nodes_per_layer[i]); - m_weights[i] = std::make_shared>(nodes_per_layer[i + 1], nodes_per_layer[i]); + m_weights[i] = + std::make_shared>(nodes_per_layer[i + 1], nodes_per_layer[i]); m_bias[i] = std::make_shared>(nodes_per_layer[i + 1]); // Generate random weight matrices @@ -231,7 +234,8 @@ namespace nnhep { typename Activator, template typename Act> typename Loss> - const shared> Network::weight_matrix(int layer_id) const { + const shared> Network::weight_matrix( + int layer_id) const { return m_weights[layer_id]; } @@ -251,7 +255,8 @@ namespace nnhep { typename Activator, template typename Act> typename Loss> - void Network::set_matrix_data(int layer_id, Matrix weight_matrix) { + void Network::set_matrix_data(int layer_id, + Matrix weight_matrix) { m_weights[layer_id] = std::make_shared>(weight_matrix); } @@ -261,7 +266,8 @@ namespace nnhep { typename Activator, template typename Act> typename Loss> - void Network::set_matrix_data(int layer_id, shared> weight_matrix_ptr) { + void Network::set_matrix_data( + int layer_id, shared> weight_matrix_ptr) { m_weights[layer_id] = weight_matrix_ptr; } @@ -271,7 +277,8 @@ namespace nnhep { typename Activator, template typename Act> typename Loss> - void Network::set_bias_data(int layer_id, std::vector bias_vector) { + void Network::set_bias_data(int layer_id, + std::vector bias_vector) { m_bias[layer_id] = std::make_shared>(bias_vector); } @@ -281,7 +288,8 @@ namespace nnhep { typename Activator, template typename Act> typename Loss> - void Network::set_bias_data(int layer_id, shared> bias_vector_ptr) { + void Network::set_bias_data( + int layer_id, shared> bias_vector_ptr) { m_bias[layer_id] = bias_vector_ptr; } @@ -291,9 +299,10 @@ namespace nnhep { typename Activator, template typename Act> typename Loss> - std::vector Network::forward_propatation(shared> layer, - shared> weight_matrix, - shared> bias_vector) { + std::vector Network::forward_propatation( + shared> layer, + shared> weight_matrix, + shared> bias_vector) { std::vector next_layer_nodes{*weight_matrix * layer->nodes() + *bias_vector}; return Activator()(next_layer_nodes); @@ -307,7 +316,8 @@ namespace nnhep { typename Loss> void Network::forward_propatation() { for (int i{}; i < n_layers - 1; ++i) { - std::vector new_layer_data{forward_propatation(m_layers[i], m_weights[i], m_bias[i])}; + std::vector new_layer_data{ + forward_propatation(m_layers[i], m_weights[i], m_bias[i])}; m_layers[i + 1]->set_node_data(new_layer_data); } } @@ -336,7 +346,8 @@ namespace nnhep { template typename Act> typename Loss> template - void Network::back_propagation(double eta, const std::vector& target) { + void Network::back_propagation(double eta, + const std::vector& target) { for (int layer_id{n_layers - 2}; layer_id >= 0; --layer_id) { back_propagation(target, layer_id, eta); } @@ -395,7 +406,8 @@ namespace nnhep { bias.push_back(std::stod(value)); } - m_weights[i] = std::make_shared>(m_weights[i]->nrows(), m_weights[i]->ncols(), weights); + m_weights[i] = std::make_shared>( + m_weights[i]->nrows(), m_weights[i]->ncols(), weights); m_bias[i] = std::make_shared>(bias); } } diff --git a/test/cuda/MatrixTest/KernelTest/matrix.cu b/test/cuda/MatrixTest/KernelTest/matrix.cu index b5267dc..09c677c 100644 --- a/test/cuda/MatrixTest/KernelTest/matrix.cu +++ b/test/cuda/MatrixTest/KernelTest/matrix.cu @@ -9,7 +9,10 @@ #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "doctest.h" -void verify_result(const std::vector &a, const std::vector &b, const std::vector &c, int N) { +void verify_result(const std::vector &a, + const std::vector &b, + const std::vector &c, + int N) { for (int i{}; i < N; ++i) { for (int j{}; j < N; ++j) { int tmp{}; @@ -25,8 +28,12 @@ void verify_result(const std::vector &a, const std::vector &b, const s std::cout << "Success\n"; } -void verify_result( - const std::vector &a, const std::vector &b, const std::vector &c, int N, int K, int M) { +void verify_result(const std::vector &a, + const std::vector &b, + const std::vector &c, + int N, + int K, + int M) { for (int i{}; i < N; ++i) { for (int j{}; j < M; ++j) { int tmp{}; diff --git a/test/serial/ActivatorsTest/Activators.cc b/test/serial/ActivatorsTest/Activators.cc index 7c73d1f..d68008a 100644 --- a/test/serial/ActivatorsTest/Activators.cc +++ b/test/serial/ActivatorsTest/Activators.cc @@ -34,7 +34,8 @@ TEST_CASE("Test the Elu functor") { CHECK(e(0.) == 0.); CHECK(e(std::numeric_limits::max()) == std::numeric_limits::max()); CHECK(e(-1) == 0.5 * (std::expm1(-1))); - CHECK(e(-std::numeric_limits::max()) == 0.5 * (std::expm1(-std::numeric_limits::max()))); + CHECK(e(-std::numeric_limits::max()) == + 0.5 * (std::expm1(-std::numeric_limits::max()))); } TEST_CASE("Test the Leaky Elu functor") { @@ -43,6 +44,7 @@ TEST_CASE("Test the Leaky Elu functor") { CHECK(le(0.) == 0.); CHECK(le(1.) == 1.); CHECK(le(std::numeric_limits::max()) == std::numeric_limits::max()); - CHECK(le(-std::numeric_limits::max()) == -0.1 * std::numeric_limits::max()); + CHECK(le(-std::numeric_limits::max()) == + -0.1 * std::numeric_limits::max()); CHECK(le(-1) == -0.1); } diff --git a/test/serial/MatrixTest/MatrixMultiplication.cc b/test/serial/MatrixTest/MatrixMultiplication.cc index de958ea..739ba10 100644 --- a/test/serial/MatrixTest/MatrixMultiplication.cc +++ b/test/serial/MatrixTest/MatrixMultiplication.cc @@ -20,7 +20,7 @@ TEST_CASE("Test scalar product between two vector matrices") { nnhep::Matrix product{m1 * m2}; - CHECK(product.get(0, 0) == 735); + CHECK(product(0, 0) == 735); } TEST_CASE("Test matrix product between two 2x2 matrices") { @@ -37,10 +37,10 @@ TEST_CASE("Test matrix product between two 2x2 matrices") { nnhep::Matrix product(2, 2); product = m1 * m2; - CHECK(product.get(0, 0) == 14); - CHECK(product.get(1, 0) == 30); - CHECK(product.get(0, 1) == 20); - CHECK(product.get(1, 1) == 44); + CHECK(product(0, 0) == 14); + CHECK(product(1, 0) == 30); + CHECK(product(0, 1) == 20); + CHECK(product(1, 1) == 44); CHECK(product[0] == 14); CHECK(product[1] == 20); CHECK(product[2] == 30); diff --git a/test/serial/MatrixTest/MatrixSum.cc b/test/serial/MatrixTest/MatrixSum.cc index 23f01dd..70e255d 100644 --- a/test/serial/MatrixTest/MatrixSum.cc +++ b/test/serial/MatrixTest/MatrixSum.cc @@ -39,10 +39,10 @@ TEST_CASE("Test matrix product between two 2x2 matrices") { nnhep::Matrix sum_matrix(2, 2); sum_matrix = m1 + m2; - CHECK(sum_matrix.get(0, 0) == 3); - CHECK(sum_matrix.get(1, 0) == 9); - CHECK(sum_matrix.get(0, 1) == 6); - CHECK(sum_matrix.get(1, 1) == 12); + CHECK(sum_matrix(0, 0) == 3); + CHECK(sum_matrix(1, 0) == 9); + CHECK(sum_matrix(0, 1) == 6); + CHECK(sum_matrix(1, 1) == 12); CHECK(sum_matrix[0] == 3); CHECK(sum_matrix[1] == 6); CHECK(sum_matrix[2] == 9); diff --git a/test/serial/MatrixTest/MatrixTransposition.cc b/test/serial/MatrixTest/MatrixTransposition.cc index 63be09d..263c3e1 100644 --- a/test/serial/MatrixTest/MatrixTransposition.cc +++ b/test/serial/MatrixTest/MatrixTransposition.cc @@ -42,7 +42,7 @@ TEST_CASE("Test the transposition of a square matrix") { CHECK(m2.ncols() == 4); for (int i{}; i < m2.nrows(); ++i) { for (int j{}; j < m2.ncols(); ++j) { - CHECK(m2.get(i, j) == m1.get(j, i)); + CHECK(m2(i, j) == m1(j, i)); } } } @@ -62,7 +62,7 @@ TEST_CASE("Test the transposition of a rectangular matrix") { CHECK(m2.ncols() == 2); for (int i{}; i < m2.nrows(); ++i) { for (int j{}; j < m2.ncols(); ++j) { - CHECK(m2.get(i, j) == m1.get(j, i)); + CHECK(m2(i, j) == m1(j, i)); } } }