-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathBrainy_Train_Unet.py
98 lines (83 loc) · 2.77 KB
/
Brainy_Train_Unet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import nobrainer
import tensorflow as tf
def main(config):
#Load sample Data--- inputs and labels
csv_of_filepaths = nobrainer.utils.get_data()
filepaths = nobrainer.io.read_csv(csv_of_filepaths)
train_paths = filepaths[:9]
evaluate_paths = filepaths[9:]
#Convert medical images to TFRecords
invalid = nobrainer.io.verify_features_labels(train_paths, num_parallel_calls=2)
assert not invalid
invalid = nobrainer.io.verify_features_labels(evaluate_paths)
assert not invalid
nobrainer.tfrecord.write(
features_labels=train_paths,
filename_template='data/data-train_shard-{shard:03d}.tfrec',
examples_per_shard=3)
nobrainer.tfrecord.write(
features_labels=evaluate_paths,
filename_template='data/data-evaluate_shard-{shard:03d}.tfrec',
examples_per_shard=1)
# Set parameters
n_classes = 1
batch_size = 2
volume_shape = (256, 256, 256)
block_shape = (128, 128, 128)
n_epochs = None
augment = False
shuffle_buffer_size = 10
num_parallel_calls = 2
# Create and Load Datasets for training and validation
dataset_train = nobrainer.dataset.get_dataset(
file_pattern="data/data-train_shard-*.tfrec",
n_classes=n_classes,
batch_size=batch_size,
volume_shape=volume_shape,
block_shape=block_shape,
n_epochs=n_epochs,
augment=augment,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_calls=num_parallel_calls,
)
dataset_evaluate = nobrainer.dataset.get_dataset(
file_pattern="data/data-evaluate_shard-*.tfrec",
n_classes=n_classes,
batch_size=batch_size,
volume_shape=volume_shape,
block_shape=block_shape,
n_epochs=1,
augment=False,
shuffle_buffer_size=None,
num_parallel_calls=1,
)
# Compile model
model = nobrainer.models.unet(n_classes=n_classes, input_shape=(*block_shape, 1),batchnorm=True,)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-04)
model.compile(optimizer=optimizer,loss=nobrainer.losses.dice,metrics=[nobrainer.metrics.dice, nobrainer.metrics.jaccard],)
# Training Model
steps_per_epoch = nobrainer.dataset.get_steps_per_epoch(
n_volumes=len(train_paths),
volume_shape=(128,128,128),
block_shape=block_shape,
batch_size=batch_size)
steps_per_epoch
validation_steps = nobrainer.dataset.get_steps_per_epoch(
n_volumes=len(evaluate_paths),
volume_shape=(128,128,128),
block_shape=block_shape,
batch_size=batch_size)
model.fit(
dataset_train,
epochs= 20,
steps_per_epoch=steps_per_epoch,
validation_data=dataset_evaluate,
validation_steps=validation_steps)
model.save_weights('weights_brainy_unet.hdf5')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-config', type=str, help='Path to config YAML file.')
args = parser.parse_args()
with open(args.config, 'r') as stream:
config = yaml.safe_load(stream)
main(config)