-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathheat.cpp
120 lines (77 loc) · 2.49 KB
/
heat.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#include "grid.h"
#include "mpi_interface.h"
#include "nc_data.h"
#include <iostream>
#include <math.h>
#include <fstream>
#include <mpi.h>
using namespace std;
using namespace netCDF;
using namespace netCDF::exceptions;
int main(int argc, char** argv){
//Initialize MPI
MPI_Init(&argc,&argv);
//Declare global grid
Grid GlobalGrid;
//Initialize global grid values
GlobalGrid.InitGridValues();
//Read non-default values if necessary
GlobalGrid.ReadInput();
//Initalize global grid coordinates
GlobalGrid.InitCoordinates();
//Vector of times at which to write the data
GlobalGrid.InitFrequencyVector();
//Declare MPI subgrids
GridTopology SubGrid(1);
//Define MPI cartesian topology
SubGrid.CreateTopology(*GlobalGrid.Nx,*GlobalGrid.Ny, GlobalGrid.hy, GlobalGrid.hx,
*GlobalGrid.Ly, *GlobalGrid.Lx);
//Get locations in memory for each process
SubGrid.GetDisplacements(*GlobalGrid.Ny, *GlobalGrid.Nx);
//Declare solution and forcing arrays
matrix u;
matrix f;
//Allocate arrays
u.allocate(SubGrid.y_dim,SubGrid.x_dim);
f.allocate(SubGrid.y_dim,SubGrid.x_dim);
u = 0.0;
//Set initial conditions and forcing
InitialConditions(u, SubGrid.y_dim, SubGrid.x_dim, *GlobalGrid.Lx, SubGrid.hx_vec_local, SubGrid.hy_vec_local);
forcing(f, SubGrid.y_dim, SubGrid.x_dim, *GlobalGrid.Lx, SubGrid.hx_vec_local, SubGrid.hy_vec_local);
//u=f;
//Declare netcdf data
string file_name = "heat.nc";
NcDataFile data;
//Define netcdf data on root process
if(SubGrid.myid==0){
data.Init(file_name);
data.InitNcData(*GlobalGrid.Ny, *GlobalGrid.Nx);
data.WriteCoordData(GlobalGrid.hy_vec,GlobalGrid.hy_vec);
}
//Define global array used for the write operation
matrix full_array;
if(SubGrid.myid==0){
full_array.allocate(*GlobalGrid.Ny,*GlobalGrid.Nx);
}
int k = 0; //record dimension index
double time = 0.0;
double write_tol = 0.00001;
//Main time-stepper
while(time < *GlobalGrid.Nt){
matrix rhs = u + f*(*GlobalGrid.dt);
ConjGrad(u,rhs,SubGrid.y_dim,SubGrid.x_dim,GlobalGrid.hx,GlobalGrid.hy,*GlobalGrid.dt, SubGrid);
if((time < GlobalGrid.write_freq_vec(k) + write_tol)
&& (time > GlobalGrid.write_freq_vec(k) - write_tol)){
//Gather all data onto root process
SubGrid.GetFullArray(full_array, u, *GlobalGrid.Ny, *GlobalGrid.Nx);
//Write data
if(SubGrid.myid == 0){
data.WriteNcData(full_array,k);
}
k += 1;
}
time += *GlobalGrid.dt;
}
MPI_Finalize();
return 0;
}