forked from NOAA-OWP/DMOD
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexample.env
348 lines (297 loc) · 18 KB
/
example.env
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
################################################################################
################################################################################
## About: ##
## ##
## This is a default example of an environment-specific .env file used in ##
## various parts of this project. This file serves as a good starting point ##
## for crafting your own .env, and is (at the time of this writing) used by ##
## the stack control script to automatically create a starting .env if one ##
## does not already exist (see the generate_default_env_file function). ##
################################################################################
################################################################################
########
# Set this variable to add a unique prefix to the stack name, and make sure that
# the DOCKER_GUI_WEB_SERVER_HOST_PORT is set to a port value not used by an
# existing stack. You will get an error deploying if there is a port conflict
########
#STACK_PREFIX=
################################################################################
################################################################################
## Model Settings ##
################################################################################
################################################################################
# Note that many of these are used in some way by Docker, but are more intrinsic to
# the model being built/run, and as such are separated from the large Docker-specific
# settings section below.
## The particular version of the NetCDF dependency to build and use within the
## 'nwm-deps' image, currently the basis for the main 'nwm' image.
##
## Note there is a problematic bug that emerges if using versions of NetCDF after
## 4.6 documented at https://github.com/NCAR/wrf_hydro_nwm_public/issues/382. For
## now at least, a fix is not ported to older NWM versions, so the older NetCDF
## must be used.
NETCDF_C_VERSION=v4.6.0
## Additionally the NetCDF-Fortran version probably also needs to be set to remain
## compatible with the main NetCDF (i.e., C language) version
NETCDF_FORTRAN_VERSION=v4.5.2
## The descriptive name of the version of the model being used, which will default
## to master if unspecified
#NWM_NAME=
## The URL and particular branch for the Git repo from which source for the NWM can
## be retrieved to be built
NWM_REPO_URL=https://github.com/NCAR/wrf_hydro_nwm_public.git
NWM_BRANCH=master
## Optionally, a particular commit can be specified
#NWM_COMMIT=
## The URL and particular branch for the Git repo from which source for the NextGen
## framework can be retrieved to be built
NGEN_REPO_URL=https://github.com/NOAA-OWP/ngen
NGEN_BRANCH=master
## Optionally, a particular commit can be specified
#NGEN_COMMIT=
## The number of parallel jobs to allow when compiling NGen as part of the ngen
## container image build. A default is set inside the Compose build file
## (currently 2) even if this variable is not.
##
## Increasing jobs will speed the build, though compiling errors may occur if
## set too high.
##
## In particular, some Docker installations have produced compiling errors with
## values N < num_cpus_available_to_build_container <= num_host_cpus.
#NGEN_BUILD_PARALLEL_JOBS=
## The path to the directory containing required hydrofabric data files.
#HYDROFABRIC_DATA_DIR=
################################################################################
################################################################################
## Python Packages Settings ##
################################################################################
################################################################################
## The "name" of the built communication Python distribution package, for purposes of installing (e.g., via pip)
PYTHON_PACKAGE_DIST_NAME_COMMS=dmod-communication
## The name of the actual Python communication package (i.e., for importing or specifying as a module on the command line)
PYTHON_PACKAGE_NAME_COMMS=dmod.communication
## The "name" of the built request service Python distribution package, for purposes of installing (e.g., via pip)
PYTHON_PACKAGE_DIST_NAME_REQUEST_SERVICE=dmod-requestservice
## Optional version constraint for the request SERVICE Python distribution package (pip format)
#PYTHON_PACKAGE_DIST_VERSION_CONSTRAINT_REQUEST_SERVICE=>=0.3.0
## The name of the actual Python request service package (i.e., for importing or specifying as a module on the command line)
PYTHON_PACKAGE_NAME_REQUEST_SERVICE=dmod.requestservice
## The "name" of the built scheduler LIBRARY Python distribution package, for purposes of installing (e.g., via pip)
PYTHON_PACKAGE_DIST_NAME_SCHEDULER=dmod-scheduler
## The name of the actual Python scheduler LIBRARY package (i.e., for importing or specifying as a module on the command line)
PYTHON_PACKAGE_NAME_SCHEDULER=dmod.scheduler
## The "name" of the built scheduler SERVICE Python distribution package, for purposes of installing (e.g., via pip)
PYTHON_PACKAGE_DIST_NAME_SCHEDULER_SERVICE=dmod-schedulerservice
## Optional version constraint for the scheduler SERVICE Python distribution package (pip format)
#PYTHON_PACKAGE_DIST_VERSION_CONSTRAINT_SCHEDULER_SERVICE=>=0.3.0
## The name of the actual Python scheduler SERVICE package (i.e., for importing or specifying as a module on the command line)
PYTHON_PACKAGE_NAME_SCHEDULER_SERVICE=dmod.schedulerservice
PYTHON_PACKAGE_DIST_NAME_ACCESS=dmod-access
PYTHON_PACKAGE_DIST_NAME_EXTERNAL_REQUESTS=dmod-externalrequests
PYTHON_PACKAGE_DIST_NAME_MODELDATA=dmod-modeldata
PYTHON_PACKAGE_NAME_MODELDATA=dmod.modeldata
PYTHON_PACKAGE_DIST_NAME_SUBSET_SERVICE=dmod-subsetservice
PYTHON_PACKAGE_NAME_SUBSET_SERVICE=dmod.subsetservice
################################################################################
################################################################################
## Docker Settings ##
################################################################################
################################################################################
########################################################################
## Docker MPI overlay network settings ##
########################################################################
## Settings for backend "MPI" network used by the DMoD stack for ##
## fast communication between nodes executing the model. In reality, ##
## MPI is not an absolute requirement, though typically it will be ##
## used. ##
## ##
## This network will be created automatically if it does not already ##
## exist by the stack control script. ##
## ##
## It is suggested the network be optimized by using a VXLAN. The ##
## aforementioned automatically-created network will be set up in ##
## in this manner. ##
## ##
## See https://tools.ietf.org/html/rfc7348 ##
########################################################################
DOCKER_MPI_NET_NAME=mpi-net
DOCKER_MPI_NET_SUBNET=10.0.0.0/24
DOCKER_MPI_NET_GATEWAY=10.0.0.1
DOCKER_MPI_NET_VXLAN_ID=4097
########################################################################
## Docker "Requests" overlay network settings ##
########################################################################
## Settings for another dedicated network, primarily used as a way ##
## for Docker containers outside the DMoD stack (but perhaps on the ##
## same Docker host) to be able to communicate requests for model ##
## runs. The primary use right now in development is to be able to ##
## start the GUI service in its own separate stack, and have it be ##
## able to connect to the request handler in this stack. ##
## ##
## This network will also be created automatically if it does not ##
## already exist by the stack control script. ##
########################################################################
DOCKER_REQUESTS_NET_NAME=requests-net
DOCKER_REQUESTS_NET_SUBNET=10.0.1.0/27
DOCKER_REQUESTS_NET_GATEWAY=10.0.1.1
########################################################################
## Private Docker Registry ##
########################################################################
## Configuration for private registry to which and from which custom ##
## images for the stack should be pushed and pulled. ##
########################################################################
## Whether it is expected that an internal registry is managed within the
## project.
## This value may be used by various scripts to determine whether to
## automatically start an additional dedicated stack and registry service.
DOCKER_INTERNAL_REGISTRY_IS_MANAGED=true
## What the name of the dedicated stack should be, if managing one
DOCKER_INTERNAL_REGISTRY_STACK_NAME=dev_registry_stack
## The path to the Docker Compose config file for a managed registry
## stack (relative to the project root).
## Because of how the control scripts work, this needs to correspond to
## the value in DOCKER_INTERNAL_REGISTRY_STACK_NAME.
DOCKER_INTERNAL_REGISTRY_STACK_CONFIG=./docker/dev_registry_stack/docker-registry.yml
## This value is handled by default if left out
#DOCKER_INTERNAL_REGISTRY_SERVICE_NAME=
## The internal registry to used.
## By default, this is set to be a internally managed registry accesible
## at 127.0.0.1 on port 5000.
## Note this replaces the use of the combination of the (now deprecated)
## DOCKER_INTERNAL_REGISTRY_HOST, ':', and DOCKER_INTERNAL_REGISTRY_PORT
DOCKER_INTERNAL_REGISTRY=127.0.0.1:5000
## The host-side port for the registry service to expose, when managing
## a registry stack.
## Note this should correspond to what is in DOCKER_INTERNAL_REGISTRY.
DOCKER_INTERNAL_REGISTRY_PORT=5000
########################################################################
## Docker Volume and Bind Mounts ##
########################################################################
## The bind-mounted host directory for images when using the above discussed
## containerized development registry.
DOCKER_HOST_IMAGE_STORE=./docker_host_volumes/images
## The bind-mounted host directory for domains data used during model execution
DOCKER_VOL_DOMAINS=./docker_host_volumes/domains
## Container and Host paths for the directory for a virtual environment used by the GUI
## application container. Note this functionality is not currently in use (i.e. commented
## out within the GUI Compose config) though it may be used in the future. The intent is
## to allow a GUI service container to have a Python environment that survives the container
## itself.
##
## Note this first env val will also be passed as an env variable inside the container
#DOCKER_GUI_CONTAINER_VENV_DIR=/usr/maas_portal_venv
## If wanting to use as-is, this should be modified to use an absolute path, due to the location
## of the GUI docker-compose.yml file disrupting expectations for relative paths
#DOCKER_GUI_HOST_VENV_DIR=./docker_host_volumes/virtual_envs/gui_venv
########################################################################
## Docker Stack Service Deployment Constraints ##
########################################################################
## Configuration settings to control deployment placement of various ##
## services when starting the DMoD stack, as configured within the ##
## docker-deploy.yml file. ##
## ##
## The docker-deploy.yml file is configure to accept two constraints ##
## for the services where these env values are applied. The first ##
## will default to something generally safe for use. The second will ##
## default to a syntactically valid tautology, which as such will not ##
## actually have any impact on deployment. ##
## ##
## As such, both of these can safely remain commented out. ##
########################################################################
## Constraints for the internal containerized registry service, if used.
DOCKER_REGISTRY_DEPLOY_CONSTRAINT_1=node.role==manager
#DOCKER_REGISTRY_DEPLOY_CONSTRAINT_2=
## Constraints for the scheduler service
DOCKER_SCHEDULER_DEPLOY_CONSTRAINT_1=node.role==manager
#DOCKER_SCHEDULER_DEPLOY_CONSTRAINT_2=
## Constraints for the MPI master service (may be deprecated)
DOCKER_MPIMASTER_DEPLOY_CONSTRAINT_1=node.role==manager
#DOCKER_MPIMASTER_DEPLOY_CONSTRAINT_2=
## Constraints for the MPI work services (may be deprecated)
DOCKER_MPIWORKER_DEPLOY_CONSTRAINT_1=node.role==manager
#DOCKER_MPIWORKER_DEPLOY_CONSTRAINT_2=
## Constraints for the requests handler service
DOCKER_REQUESTS_DEPLOY_CONSTRAINT_1=node.role==manager
#DOCKER_REQUESTS_DEPLOY_CONSTRAINT_2=
########################################################################
## Docker Service-Specific Settings ##
########################################################################
## The listening port for websocket communication for the request handler service
DOCKER_REQUESTS_CONTAINER_PORT=3012
## A container directory in the request handler service where SSL certs are found for listening connections
#DOCKER_REQUESTS_CONTAINER_SERVICE_SSL_DIR=
## A container directory in the request handler service where SSL certs are found for client connections to the scheduler
#DOCKER_REQUESTS_CONTAINER_CLIENT_SSL_DIR=
## The endpoint for the the request handler to use when trying to reach the scheduler service
#DOCKER_REQUESTS_SCHEDULER_ENDPOINT_HOST=
## The hostname for the request websocket endpoint used by the GUI for request communication
# TODO: this needs to be adjusted to properly account for the stack name
DOCKER_GUI_MAAS_ENDPOINT_HOST=nwm-master_request-service
## The bound port on the host for the GUI stack web server service container.
## Essentially, this is the port where the application can be accessed.
## This is handled by default in the compose file if not set.
#DOCKER_GUI_WEB_SERVER_HOST_PORT=8081
## The config file for the web server application in analogous service.
## Note that this needs to be the file's relative path from the project root.
## This property is mainly provided for use during development and debugging, and
## thus is handled by default in the compose file if not set.
#DOCKER_GUI_WEB_SERVER_CONFIG_FILE=
## The stack-internal hostname alias for the Redis service
DOCKER_REDIS_SERVICE_ALIAS=redis
## Note that the password value for the Redis service is now managed using a Docker Secret.
## Directory containing redis deployment configuration file called redis.conf
#REDIS_CONF_DIR=
## Directory containing ssl certs for various services:
## should contain individual subdirectories (each with the respective cert) requests requestsservice scheduler
## TODO default to loading system certs
#DMOD_SSL_DIR=
## Directory containing scheduler resource and configuration informating, namely
## image_and_domain.ymal and resources.yaml
#SCHEDULER_RESOURCE_DIR=
## Directory containing static DMOD data that the GUI needs to be able to access,
## for example, ngen hydrofabrics
## This volume will also be used for the django app and web server to store all static assests to serve
#DMOD_APP_STATIC=
## DMOD GUI DataBase Settings
#Required variables
#local mount point for persisted database
#DMOD_GUI_POSTGRES_DATA=
#Database access variables
#DMOD_GUI_POSTGRES_USER=
#DMOD_GUI_POSTGRES_PASSWORD=
#Optional Variables
#Defaults to dmod-dev if not set
#DMOD_GUI_POSTGRES_DB=
## DMOD admin interface default user
#by default, the dmod admin user will be dmod_admin
#DMOD_SU_NAME=
#required
#DMOD_SU_EMAIL
#DMOD_SU_PASSWORD
#######################################
## DMOD Object Storage Configuration
## Note when using internal object storage, you will need to lablel your swarm nodes
## correctly so that the storage services launch on the correct node
## From a swarm manager node
## docker node update --label-add minio1=true node-1
## docker node update --label-add minio2=true node-2
## repeat for each minio service defined in dmod/docker/object_storage/docker-compose.yml
## label the proxy node where the DMOD_OBJECT_STORE_PROXY_CONFIG is located
## docker node update --label-add object_store_proxy=true
#######################################
## The nginx proxy config for the object storage, defaults to dmod/docker/object_store/nginx.conf
#DMOD_OBJECT_STORE_PROXY_CONFIG=
#######################################
## The host directories to mount into Object Store service containers, where object store data is stored.
## These are expected as host bind mounts, simply because they may be large and need to not be on the host's
## root partition (i.e., it may not be safe to use internal Docker Volumes).
#DMOD_OBJECT_STORE_HOST_DIR_1=
#DMOD_OBJECT_STORE_HOST_DIR_2=
## Port on Docker host, to which the web console port of 1st stack service is forwarded
DMOD_OBJECT_STORE_1_CONSOLE_HOST_PORT=9001
## Port on Docker host, to which the web console port of 2nd stack service is forwarded
DMOD_OBJECT_STORE_2_CONSOLE_HOST_PORT=9003
## Port on Docker host, to which the object store listening port (probably 9000) of 1st stack service is forwarded
DMOD_OBJECT_STORE_1_HOST_PORT=9002
## Port on Docker host, to which the object store listening port (probably 9000) of 2nd stack service is forwarded
DMOD_OBJECT_STORE_2_HOST_PORT=9004