Skip to content

Commit

Permalink
Fix typo; replace store_type with store_t.
Browse files Browse the repository at this point in the history
  • Loading branch information
hummingtree committed Nov 12, 2024
1 parent 53eb2ae commit d0da823
Show file tree
Hide file tree
Showing 12 changed files with 20 additions and 20 deletions.
4 changes: 2 additions & 2 deletions include/color_spinor_field_order.h
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,7 @@ namespace quda
*/
template <typename Float, typename storeFloat, bool block_float_, typename norm_t> struct fieldorder_wrapper {
using value_type = Float; /**< Compute type */
using store_type = storeFloat; /**< Storage type */
using store_t = storeFloat; /**< Storage type */
complex<storeFloat> *v; /**< Field memory address this wrapper encompasses */
const int idx; /**< Index into field */
private:
Expand Down Expand Up @@ -858,7 +858,7 @@ namespace quda
static constexpr int nSpin = nSpin_;
static constexpr int nColor = nColor_;

using store_type = storeFloat;
using store_t = storeFloat;

field<Float, storeFloat, fixed, block_float> v;
unsigned int volumeCB = 0;
Expand Down
2 changes: 1 addition & 1 deletion include/gauge_field_order.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ namespace quda {
template <typename Float, typename storeFloat>
struct fieldorder_wrapper {
using value_type = Float;
using store_type = storeFloat;
using store_t = storeFloat;
complex<storeFloat> *v;
const unsigned int idx;

Expand Down
8 changes: 4 additions & 4 deletions include/kernels/dslash_coarse_mma.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ namespace quda
constexpr bool a_dagger = false;
constexpr bool b_dagger = false;

using store_b_ghost_t = complex<typename decltype(b)::store_type>;
using store_b_ghost_t = complex<typename decltype(b)::store_t>;
auto smem_tmp_b_ghost = reinterpret_cast<store_b_ghost_t *>(smem_tmp_b);

__syncthreads();
Expand Down Expand Up @@ -303,7 +303,7 @@ namespace quda

using a_wrapper_t = decltype(arg.Y(0, 0, 0, 0, 0));
using b_wrapper_t = decltype(arg.halo.Ghost(0, 0, 0, 0, 0, 0, 0));
using store_b_ghost_t = complex<typename b_wrapper_t::store_type>;
using store_b_ghost_t = complex<typename b_wrapper_t::store_t>;
auto smem_tmp_b_ghost = reinterpret_cast<store_b_ghost_t *>(smem_tmp_b);
constexpr bool a_fixed = a_wrapper_t::fixed;
constexpr bool b_fixed = b_wrapper_t::fixed;
Expand Down Expand Up @@ -369,7 +369,7 @@ namespace quda
constexpr bool a_dagger = true;
constexpr bool b_dagger = false;

using store_b_ghost_t = complex<typename decltype(b)::store_type>;
using store_b_ghost_t = complex<typename decltype(b)::store_t>;
auto smem_tmp_b_ghost = reinterpret_cast<store_b_ghost_t *>(smem_tmp_b);

__syncthreads();
Expand Down Expand Up @@ -415,7 +415,7 @@ namespace quda

using a_wrapper_t = decltype(arg.Y.Ghost(0, 0, 0, 0, 0));
using b_wrapper_t = decltype(arg.halo.Ghost(0, 0, 0, 0, 0, 0, 0));
using store_b_ghost_t = complex<typename b_wrapper_t::store_type>;
using store_b_ghost_t = complex<typename b_wrapper_t::store_t>;
auto smem_tmp_b_ghost = reinterpret_cast<store_b_ghost_t *>(smem_tmp_b);
constexpr bool a_fixed = a_wrapper_t::fixed;
constexpr bool b_fixed = b_wrapper_t::fixed;
Expand Down
8 changes: 4 additions & 4 deletions include/kernels/restrictor_mma.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,8 @@ namespace quda

int fine_spin = (Arg::fineSpin == 1 ? 0 : fine_spin_block + coarse_spin * Arg::spin_block_factor);
auto a_gmem = gmem(v_parity, x_fine_cb, fine_spin, fine_color, contiguous + contiguous_dim_offset);
complex<typename gmem_obj_t::store_type> a[elements_per_thread];
mma::batch_load_t<complex<typename gmem_obj_t::store_type>, elements_per_thread>::load(a, a_gmem.data());
complex<typename gmem_obj_t::store_t> a[elements_per_thread];
mma::batch_load_t<complex<typename gmem_obj_t::store_t>, elements_per_thread>::load(a, a_gmem.data());

int smem_m = contiguous;
int smem_k = (thread_idx * Arg::spin_block_factor + fine_spin_block) * Arg::fineColor + fine_color;
Expand All @@ -146,11 +146,11 @@ namespace quda
int coarse_spin, int contiguous_dim_offset, int aggregate_k_offset,
int *coarse_to_fine, const Arg &arg)
{
constexpr int elements_per_thread = 16 / (sizeof(typename gmem_obj_t::store_type) * 2);
constexpr int elements_per_thread = 16 / (sizeof(typename gmem_obj_t::store_t) * 2);
static_assert(contiguous_dim % elements_per_thread == 0, "contiguous_dim %% elements_per_thread == 0");
float block_rescale_factor = 1.0f;

using store_t = typename gmem_obj_t::store_type;
using store_t = typename gmem_obj_t::store_t;

if constexpr (rescale) {
float thread_max = 0;
Expand Down
2 changes: 1 addition & 1 deletion include/targets/cuda/mma_tensor_op/gemm.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ namespace quda
* @param a wrapper for operand A: the object needs to have the following methods:
* - .data() that returns the (global memory) address to which we are loading/storing
* - ::type the type for the computing type
* - ::store_type the type for the storage type
* - ::store_t the type for the storage type
* - ::fixed a bool indicates if the object ueses fix point format
* - .scale/scale_inv the scales for the fixed point format objects
* @param b similar to a
Expand Down
4 changes: 2 additions & 2 deletions include/targets/cuda/mma_tensor_op/gmem_loader.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ namespace quda
auto scale_inv = gmem.get_scale_inv();
constexpr bool fixed = GmemAccessor::fixed;

using store_t = typename GmemAccessor::store_type;
using store_t = typename GmemAccessor::store_t;

constexpr bool x = (transpose == dagger);

Expand Down Expand Up @@ -720,7 +720,7 @@ namespace quda
{
constexpr bool x = (transpose == dagger);

using store_t = typename GmemAccessor::store_type;
using store_t = typename GmemAccessor::store_t;

constexpr int n_stride = transpose == dagger ? block_y : block_z;
constexpr int m_stride = transpose == dagger ? block_z * batch : block_y * batch;
Expand Down
2 changes: 1 addition & 1 deletion include/targets/cuda/mma_tensor_op/hmma_m16n16k4_sm70.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ namespace quda
GmemOperandC &cc, const OperandC &op_c_real,
const OperandC &op_c_imag, op_t op)
{
using store_t = typename GmemOperandC::store_type;
using store_t = typename GmemOperandC::store_t;

const int row = warp_row + wrm.row_offset + (wrm.quad_thread % 2);
const int col = warp_col + wrm.quad_col * 8 + (wrm.quad_thread / 2) * 2;
Expand Down
2 changes: 1 addition & 1 deletion include/targets/cuda/mma_tensor_op/hmma_m16n8k8_sm70.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ namespace quda
GmemOperandC &cc, const OperandC &op_c_real,
const OperandC &op_c_imag, op_t op)
{
using store_t = typename GmemOperandC::store_type;
using store_t = typename GmemOperandC::store_t;

const int row = warp_row + wrm.row_offset + (wrm.quad_thread % 2);
const int col = warp_col + wrm.quad_col * 4 + (wrm.quad_thread / 2) * 2;
Expand Down
2 changes: 1 addition & 1 deletion include/targets/cuda/mma_tensor_op/simt.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ namespace quda
gmem_op_t &cc, const OperandC &op_c_real, const OperandC &op_c_imag,
op_t op)
{
using store_t = typename gmem_op_t::store_type;
using store_t = typename gmem_op_t::store_t;
using complex_t = complex<store_t>;

auto *C = reinterpret_cast<complex_t *>(cc.data());
Expand Down
2 changes: 1 addition & 1 deletion include/targets/cuda/mma_tensor_op/smma_m16n8_sm80.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ namespace quda
gmem_op_t &cc, const OperandC &op_c_real, const OperandC &op_c_imag,
op_t op)
{
using store_t = typename gmem_op_t::store_type;
using store_t = typename gmem_op_t::store_t;
using complex_t = complex<store_t>;

auto *C = reinterpret_cast<complex_t *>(cc.data());
Expand Down
2 changes: 1 addition & 1 deletion include/targets/cuda/mma_tensor_op/smma_m16n8k8_sm70.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace quda

static constexpr bool do_rescale()
{
return true; // false because we use FP16
return true; // true because we use FP16
}

static constexpr int MMA_M = 16;
Expand Down
2 changes: 1 addition & 1 deletion include/targets/cuda/tma_helper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ namespace quda
if (CUDA_SUCCESS != error) {
const char *str;
cuGetErrorName(error, &str);
errorQuda("TMA descriptor creation returned %s\n", str);
errorQuda("TMA descriptor creation returned %s", str);
}

return {ret_value};
Expand Down

0 comments on commit d0da823

Please sign in to comment.