Skip to content

Commit

Permalink
Use PAGE_SIZE instead of 4096 constant
Browse files Browse the repository at this point in the history
  • Loading branch information
kostis committed Jun 23, 2022
1 parent a3b9914 commit 8d5ed4b
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 56 deletions.
11 changes: 4 additions & 7 deletions tests/api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@ namespace dd = argo::data_distribution;
namespace mem = argo::mempools;
extern mem::global_memory_pool<>* default_global_mempool;

/** @brief ArgoDSM page size */
const std::size_t page_size = 4096;

/** @brief A "random" char constant */
constexpr char c_const = 'a';

Expand Down Expand Up @@ -79,15 +76,15 @@ TEST_F(APITest, GetHomeNode) {
char* end = start + argo::backend::global_size();

/* Touch an equal (+/- 1) number of pages per node */
for(std::size_t s = page_size*node_id; s < alloc_size-1; s += page_size*num_nodes) {
for(std::size_t s = PAGE_SIZE*node_id; s < alloc_size-1; s += PAGE_SIZE*num_nodes) {
tmp[s] = c_const;
}
argo::barrier();

/* Test that the number of pages owned by each node is equal (+/- 1) */
std::size_t counter = 0;
std::vector<std::size_t> node_counters(num_nodes);
for(char* c = start; c < end; c += page_size) {
for(char* c = start; c < end; c += PAGE_SIZE) {
node_counters[argo::get_homenode(c)]++;
counter++;
}
Expand All @@ -109,9 +106,9 @@ TEST_F(APITest, GetBlockSize) {
std::size_t api_block_size = argo::get_block_size();
std::size_t size_per_node = argo::backend::global_size()/argo::number_of_nodes();
if(dd::is_cyclic_policy()) {
ASSERT_EQ(api_block_size, env_block_size*page_size);
ASSERT_EQ(api_block_size, env_block_size*PAGE_SIZE);
} else if (dd::is_first_touch_policy()) {
ASSERT_EQ(api_block_size, page_size);
ASSERT_EQ(api_block_size, PAGE_SIZE);
} else {
ASSERT_EQ(api_block_size, size_per_node);
}
Expand Down
6 changes: 3 additions & 3 deletions tests/backend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -688,7 +688,7 @@ TEST_F(backendTest, randAccessesPeriodicSelectiveReleaseAcquireArray) {
*/
TEST_F(backendTest, randAccessesBarrierPage) {
// Allocate global page
constexpr std::size_t array_size = 4096 / sizeof(unsigned char);
constexpr std::size_t array_size = PAGE_SIZE / sizeof(unsigned char);
unsigned char*const array = argo::conew_array<unsigned char>(array_size);

// Allocate indices array and populate it
Expand Down Expand Up @@ -745,7 +745,7 @@ TEST_F(backendTest, randAccessesBarrierPage) {
*/
TEST_F(backendTest, randAccessesBulkySelectiveReleaseAcquirePage) {
// Allocate global page
constexpr std::size_t array_size = 4096 / sizeof(unsigned char);
constexpr std::size_t array_size = PAGE_SIZE / sizeof(unsigned char);
unsigned char*const array = argo::conew_array<unsigned char>(array_size);

// Allocate indices array and populate it
Expand Down Expand Up @@ -808,7 +808,7 @@ TEST_F(backendTest, randAccessesBulkySelectiveReleaseAcquirePage) {
*/
TEST_F(backendTest, randAccessesPeriodicSelectiveReleaseAcquirePage) {
// Allocate global page
constexpr std::size_t array_size = 4096 / sizeof(unsigned char);
constexpr std::size_t array_size = PAGE_SIZE / sizeof(unsigned char);
unsigned char*const array = argo::conew_array<unsigned char>(array_size);

// Allocate indices array and populate it
Expand Down
48 changes: 23 additions & 25 deletions tests/barrier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
constexpr std::size_t size = 1<<30;
/** @brief ArgoDSM cache size */
constexpr std::size_t cache_size = size/8;
/** @brief Size of an ArgoDSM page */
constexpr std::size_t page_size = 4096;

/** @brief Maximum number of threads to run in the stress tests */
constexpr int max_threads = 128;
Expand Down Expand Up @@ -52,32 +50,32 @@ TEST_F(barrierTest, simpleBarrier) {
TEST_F(barrierTest, barrierUpgradeWriters) {
std::size_t num_pages = 32;
const char a = 'a';
char* c_array = argo::conew_array<char>(page_size*num_pages);
char* c_array = argo::conew_array<char>(PAGE_SIZE*num_pages);

// Write data on node 0
if(argo::node_id() == 0) {
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
c_array[i] = a;
}
}
argo::barrier();

// Read data on all nodes
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
ASSERT_EQ(c_array[i], a);
}
// Upgrade all non-private pages to shared
argo::barrier_upgrade_writers();

// Read data on all nodes and self-invalidate through a barrier
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
ASSERT_EQ(c_array[i], a);
}
argo::barrier();

// Check that all nodes have all pages cached (or local)
for(std::size_t n = 0; n < num_pages; n++) {
ASSERT_TRUE(argo::backend::is_cached(&c_array[n*page_size]));
ASSERT_TRUE(argo::backend::is_cached(&c_array[n*PAGE_SIZE]));
}
}

Expand All @@ -89,26 +87,26 @@ TEST_F(barrierTest, barrierUpgradeWriters) {
TEST_F(barrierTest, barrierReadUpgraded) {
std::size_t num_pages = 32;
const char a = 'a';
char* c_array = argo::conew_array<char>(page_size*num_pages);
char* c_array = argo::conew_array<char>(PAGE_SIZE*num_pages);

// Write data on node 0
if(argo::node_id() == 0) {
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
c_array[i] = a;
}
}
// Upgrade all non-private pages to shared
argo::barrier_upgrade_writers();

// Read data on all nodes and self-invalidate through a barrier
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
ASSERT_EQ(c_array[i], a);
}
argo::barrier();

// Check that all nodes have all pages cached (or local)
for(std::size_t n = 0; n < num_pages; n++) {
ASSERT_TRUE(argo::backend::is_cached(&c_array[n*page_size]));
ASSERT_TRUE(argo::backend::is_cached(&c_array[n*PAGE_SIZE]));
}
}

Expand All @@ -121,42 +119,42 @@ TEST_F(barrierTest, barrierDowngradeAfterUpgrade) {
const std::size_t num_pages = 32;
const char a = 'a';
const char b = 'b';
char* c_array = argo::conew_array<char>(page_size*num_pages);
char* c_array = argo::conew_array<char>(PAGE_SIZE*num_pages);

// Write data on node 0
if(argo::node_id() == 0) {
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
c_array[i] = a;
}
}
argo::barrier();

// Read data on all nodes
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
ASSERT_EQ(c_array[i], a);
}
// Upgrade all non-private pages to shared
argo::barrier_upgrade_writers();

// Write to the same pages again
if(argo::node_id() == 0) {
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
c_array[i] = b;
}
}
argo::barrier();

// Read data again all nodes
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
ASSERT_EQ(c_array[i], b);
}
argo::barrier();

// Check that no node besides 0 has anything cached
if(argo::node_id() != 0) {
for(std::size_t n = 0; n < num_pages; n++) {
if(argo::get_homenode(&c_array[n*page_size]) != argo::node_id()) {
ASSERT_FALSE(argo::backend::is_cached(&c_array[n*page_size]));
if(argo::get_homenode(&c_array[n*PAGE_SIZE]) != argo::node_id()) {
ASSERT_FALSE(argo::backend::is_cached(&c_array[n*PAGE_SIZE]));
}
}
}
Expand All @@ -170,18 +168,18 @@ TEST_F(barrierTest, barrierDowngradeAfterUpgrade) {
TEST_F(barrierTest, barrierUpgradeAll) {
const std::size_t num_pages = 32;
const char a = 'a';
char* c_array = argo::conew_array<char>(page_size*num_pages);
char* c_array = argo::conew_array<char>(PAGE_SIZE*num_pages);

// Write data on node 0
if(argo::node_id() == 0) {
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
c_array[i] = a;
}
}
argo::barrier();

// Read data on all nodes
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
ASSERT_EQ(c_array[i], a);
}

Expand All @@ -190,20 +188,20 @@ TEST_F(barrierTest, barrierUpgradeAll) {

// Check that no node has anything cached
for(std::size_t n = 0; n < num_pages; n++) {
if(argo::get_homenode(&c_array[n*page_size]) != argo::node_id()) {
ASSERT_FALSE(argo::backend::is_cached(&c_array[n*page_size]));
if(argo::get_homenode(&c_array[n*PAGE_SIZE]) != argo::node_id()) {
ASSERT_FALSE(argo::backend::is_cached(&c_array[n*PAGE_SIZE]));
}
}

// Read data on all nodes
for(std::size_t i = 0; i < page_size*num_pages; i++) {
for(std::size_t i = 0; i < PAGE_SIZE*num_pages; i++) {
ASSERT_EQ(c_array[i], a);
}
argo::barrier();

// Check that all nodes have all pages cached (or local)
for(std::size_t n = 0; n < num_pages; n++) {
ASSERT_TRUE(argo::backend::is_cached(&c_array[n*page_size]));
ASSERT_TRUE(argo::backend::is_cached(&c_array[n*PAGE_SIZE]));
}
}

Expand Down
39 changes: 18 additions & 21 deletions tests/prefetch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,6 @@ namespace dd = argo::data_distribution;
namespace mem = argo::mempools;
extern mem::global_memory_pool<>* default_global_mempool;

/** @brief ArgoDSM page size */
const std::size_t page_size = 4096;

/** @brief A "random" char constant */
constexpr char c_const = 'a';

Expand All @@ -52,7 +49,7 @@ class PrefetchTest : public testing::Test {


/**
* @brief Unittest that checks that there is no error when accessing
* @brief Unit test that checks that there is no error when accessing
* the first byte of the allocation.
*/
TEST_F(PrefetchTest, FirstPage) {
Expand All @@ -66,7 +63,7 @@ TEST_F(PrefetchTest, FirstPage) {
}

/**
* @brief Unittest that checks that there is no error when accessing
* @brief Unit test that checks that there is no error when accessing
* the last byte of the allocation.
*/
TEST_F(PrefetchTest, OutOfBounds) {
Expand All @@ -80,24 +77,24 @@ TEST_F(PrefetchTest, OutOfBounds) {
}

/**
* @brief Unittest that checks that there is no error when accessing
* @brief Unit test that checks that there is no error when accessing
* bytes on either side of a page boundary.
*/
TEST_F(PrefetchTest, PageBoundaries) {
std::size_t alloc_size = default_global_mempool->available();
char *tmp = static_cast<char*>(collective_alloc(alloc_size));
std::size_t load_size = env::load_size();
if(argo::node_id() == 0) {
tmp[(page_size*load_size)-1] = c_const;
tmp[page_size*load_size] = c_const;
tmp[(PAGE_SIZE*load_size)-1] = c_const;
tmp[PAGE_SIZE*load_size] = c_const;
}
argo::barrier();
ASSERT_EQ(c_const, tmp[(page_size*load_size)-1]);
ASSERT_EQ(c_const, tmp[page_size*load_size]);
ASSERT_EQ(c_const, tmp[(PAGE_SIZE*load_size)-1]);
ASSERT_EQ(c_const, tmp[PAGE_SIZE*load_size]);
}

/**
* @brief Unittest that checks that pages are correctly prefetched.
* @brief Unit test that checks that pages are correctly prefetched.
*/
TEST_F(PrefetchTest, AccessPrefetched) {
std::size_t alloc_size = default_global_mempool->available();
Expand All @@ -111,18 +108,18 @@ TEST_F(PrefetchTest, AccessPrefetched) {
if(dd::is_cyclic_policy()) {
/*
* For blocked policies, at most one block is guaranteed
* to be contiguious in both global memory and backing store
* to be contiguous in both global memory and backing store
*/
stride = std::min(block_size, load_size);
start_page = 0;
} else {
/*
* For the first touch policiy, at most
* (alloc_size/4096)/num_nodes are guaranteed to be contiguious
* (alloc_size/PAGE_SIZE)/num_nodes are guaranteed to be contiguous
* in both global memory and backing store.
*/
stride = (load_size < ((alloc_size/page_size)/num_nodes)) ?
load_size : (alloc_size/page_size)/num_nodes - 1;
stride = (load_size < ((alloc_size/PAGE_SIZE)/num_nodes)) ?
load_size : (alloc_size/PAGE_SIZE)/num_nodes - 1;
start_page = 0;
}
std::size_t end_page = start_page+stride;
Expand All @@ -134,8 +131,8 @@ TEST_F(PrefetchTest, AccessPrefetched) {
for(std::size_t page_num = start_page;
page_num < end_page;
page_num++) {
for(std::size_t i = 0; i < page_size; i++) {
tmp[page_num*page_size+i] = c_const;
for(std::size_t i = 0; i < PAGE_SIZE; i++) {
tmp[page_num*PAGE_SIZE+i] = c_const;
}
/**
* This fence prevents both compiler and CPU reordering
Expand All @@ -152,19 +149,19 @@ TEST_F(PrefetchTest, AccessPrefetched) {

/* On all nodes, check that after accessing the first page,
* all expected pages are either node local or cached. */
ASSERT_EQ(tmp[start_page*page_size], c_const); // Access first page
ASSERT_EQ(tmp[start_page*PAGE_SIZE], c_const); // Access first page
for(std::size_t page_num = start_page;
page_num < end_page;
page_num++) {
ASSERT_TRUE(argo::backend::is_cached(&tmp[page_num*page_size]));
ASSERT_TRUE(argo::backend::is_cached(&tmp[page_num*PAGE_SIZE]));
}
/* On all nodes, check that after accessing the first page,
* all data is correct. */
for(std::size_t page_num = start_page;
page_num < end_page;
page_num++) {
for(std::size_t i = 0; i < page_size; i++) {
ASSERT_EQ(tmp[page_num*page_size+i], c_const);
for(std::size_t i = 0; i < PAGE_SIZE; i++) {
ASSERT_EQ(tmp[page_num*PAGE_SIZE+i], c_const);
}
}
}
Expand Down

0 comments on commit 8d5ed4b

Please sign in to comment.