-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathaddVec.cpp
76 lines (60 loc) · 1.87 KB
/
addVec.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <iostream>
// Kernel function to be executed on the GPU
__global__ void vector_add(float* A, float* B, float* C, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
C[idx] = A[idx] + B[idx];
}
}
int main(int argc, char* argv[]) {
// Initialize MPI
MPI_Init(&argc, &argv);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
// Size of the vectors
int N = 1024;
// Allocate host memory
float *h_A = new float[N];
float *h_B = new float[N];
float *h_C = new float[N];
// Initialize vectors
for (int i = 0; i < N; ++i) {
h_A[i] = i;
h_B[i] = i * 2;
}
// Allocate device memory
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, N * sizeof(float));
hipMalloc(&d_B, N * sizeof(float));
hipMalloc(&d_C, N * sizeof(float));
// Copy data from host to device
hipMemcpy(d_A, h_A, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, N * sizeof(float), hipMemcpyHostToDevice);
// Launch kernel
int threads_per_block = 256;
int blocks_per_grid = (N + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(vector_add, dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_A, d_B, d_C, N);
// Copy result back to host
hipMemcpy(h_C, d_C, N * sizeof(float), hipMemcpyDeviceToHost);
// Each process prints its portion of the result
std::cout << "Process " << world_rank << " results: ";
for (int i = 0; i < N; ++i) {
std::cout << h_C[i] << " ";
}
std::cout << std::endl;
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Free host memory
delete[] h_A;
delete[] h_B;
delete[] h_C;
// Finalize MPI
MPI_Finalize();
return 0;
}