Skip to content

Commit

Permalink
created single study prediction from dicom
Browse files Browse the repository at this point in the history
  • Loading branch information
marshuang80 committed Jan 22, 2020
1 parent 929ae8b commit 2af3eee
Show file tree
Hide file tree
Showing 10 changed files with 162 additions and 9 deletions.
2 changes: 0 additions & 2 deletions datasets/base_ct_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,6 @@ def _normalize_raw(self, pixels):


pixels = (pixels - self.pixel_dict['min_val']) / (self.pixel_dict['max_val'] - self.pixel_dict['min_val'])
#print(pixels.min(), pixels.max())


pixels = np.clip(pixels, 0., 1.) - self.pixel_dict['avg_val']

Expand Down
8 changes: 4 additions & 4 deletions datasets/ct_pe_dataset_3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def __init__(self, args, phase, is_training_set=True):
self.resize_shape = args.resize_shape
self.is_test_mode = not args.is_training
self.pe_types = args.pe_types
self.meta_features = args.features.split(",")
#self.meta_features = args.features.split(",")

# Augmentation
self.crop_shape = args.crop_shape
Expand Down Expand Up @@ -160,9 +160,9 @@ def __getitem__(self, idx):

# TODO

meta = self._parse_metadata(ctpe)
#meta = self._parse_metadata(ctpe)

meta = torch.from_numpy(meta).type(torch.float)
#meta = torch.from_numpy(meta).type(torch.float)

# metadata dictionary
#meta_dict = {"age": age,
Expand All @@ -178,7 +178,7 @@ def __getitem__(self, idx):
'slice_idx': start_idx,
'series_idx': ctpe_idx}

return volume, target, meta
return volume, target#, meta
#return volume, target

def get_series_label(self, series_idx):
Expand Down
1 change: 1 addition & 0 deletions intermountain/parse_raw_dicom.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ def main(args):

# create np array of slices and save to output_dir/images directory
npy_volume = np.array([dcm.pixel_array for dcm in dcm_slices_sorted])
print(npy_volume.shape)

# TODO normalize data to range -3024, 3071
#npy_volume = np.interp(npy_volume, (npy_volume.min(), npy_volume.max()), (-3024, 3071))
Expand Down
2 changes: 1 addition & 1 deletion intermountain/parse_raw_dicom.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#sudo python parse_raw_dicom.py --input_dir /mnt/bbq/intermountain/CTPA_RANDOM_DICOM/ --output_dir /mnt/bbq/intermountain/TEST1/ --csv_path /mnt/bbq/intermountain/intermountain_labeled.csv
#sudo python parse_raw_dicom.py --input_dir /mnt/bbq/intermountain2/CTPA_Images/ --output_dir /mnt/bbq/intermountain2/CTPA_SUBSEG/ --csv_path /mnt/bbq/intermountain2/CTPA_intermountain_2.csv > ./parse_log.txt
sudo python parse_raw_dicom.py --input_dir /mnt/bbq/intermountain2/CTPA_Images/ --output_dir /mnt/bbq/intermountain2/CTPA_COMBINED_SUBSEG/ --csv_path /mnt/bbq/intermountain2/intermountain_combined.csv
python3 parse_raw_dicom.py --input_dir /data4/intermountain/CTPA/test --output_dir ~/ --csv_path /data4/intermountain2/intermountain_combined.csv
2 changes: 1 addition & 1 deletion models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@
from .xnet import XNet
from .xnet_classifier import XNetClassifier
from .resnet import ResNet
from .fusionnet import FusionNet
#from .fusionnet import FusionNet
2 changes: 1 addition & 1 deletion run_test_intermountain.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ python ./test_intermountain.py --phase test \
--ckpt_path /data4/PE_stanford/ckpts/best.pth.tar \
--name stanford \
--dataset pe \
--data_dir /data4/PE_stanford \
--data_dir /data4/intermountain2/CTPA_FINAL \
--gpu_ids 0

#python ./count.py --phase test --results_dir ../results --ckpt_path /mnt/bbq/PE_stanford/ckpts/best.pth.tar --name stanford --dataset pe --data_dir /mnt/bbq/PE_stanford --gpu_ids 0
Expand Down
54 changes: 54 additions & 0 deletions test_from_dicom.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""Take in DICOM file and output CTPA to into Model
"""

import argparse
import util
import torch
from saver import ModelSaver


def main(args):

# create npy from dicom
print("Reading input dicom...")
study = util.dicom_2_npy(args.input_study, args.series_description)

# normalize and convert to tensor
print("Formatting input for model...")
study_windows = util.format_img(study)

print ("Loading saved model...")
model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)

print ("Sending model to GPU device...")
#start_epoch = ckpt_info['epoch'] + 1
model = model.to(args.device)

print ("Evaluating study...")
model.eval()
predicted_probabilities = [] # window wise for a single study
with torch.no_grad():
for window in study_windows:
cls_logits = model.forward(window.to(args.device, dtype=torch.float))
cls_probs = torch.sigmoid(cls_logits).to('cpu').numpy()
predicted_probabilities.append(cls_probs[0][0])

print (f"Probablity of having Pulmonary Embolism: {max(predicted_probabilities)}")

if __name__ == "__main__":

# parse in arguments
parser = argparse.ArgumentParser()
parser.add_argument("--input_study", type=str, default="/data4/intermountain/CTPA/CTPA_RANDOM_DICOM/1770659")
parser.add_argument("--series_description", type=str, default="CTA 2.0 CTA/PULM CE")
parser.add_argument("--ckpt_path", type=str, default="/data4/PE_stanford/ckpts/best.pth.tar")
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--gpu_ids", type=str, default="0")
args = parser.parse_args()

if "," in args.gpu_ids:
args.gpu_ids = args.gpu_ids.split(",")
else:
args.gpu_ids = [int(args.gpu_ids)]

main(args)
3 changes: 3 additions & 0 deletions test_intermountain.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,9 @@ def test(args):
print("Saving predictions to pickle files")
with open('{}/preds.pickle'.format(args.results_dir),"wb") as fp:
pickle.dump(predictions,fp)

print(predictions.keys())
print(predictions[1770659])

# Write features for XGBoost
#save_for_xgb(args.results_dir, study2probs, study2labels)
Expand Down
1 change: 1 addition & 0 deletions util/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
from .image_util import *
#from .optim_util import *
from .io_util import *
from .single_study_util import *
96 changes: 96 additions & 0 deletions util/single_study_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import numpy as np
import pydicom
import cv2
import torch
import util
import os
import glob
from ct.ct_pe_constants import W_CENTER_DEFAULT, W_WIDTH_DEFAULT, CONTRAST_HU_MEAN, \
CONTRAST_HU_MIN, CONTRAST_HU_MAX



def dicom_2_npy(input_study, series_description):
dcm_slices = []
# TODO:
# study_name =
# subsegmental =
# start =
# end =

# get all dicom files in path
files = glob.glob(os.path.join(input_study,"*dcm"))
if len(files) == 0:
raise Exception("No dicom files in directory")
return

# read in all dcm slices
for dicom in files:
try:
dcm = pydicom.dcmread(dicom)
arr = dcm.pixel_array
except:
print("error reading dicom")
continue
# skip dicom types that we don't want
if dcm.SeriesDescription != series_description:
continue
dcm_slices.append(dcm)

# check if dicoms are succesfully retrived
if len(dcm_slices) == 0:
raise Exception("no dicom files retrived")
return

# sort slices
dcm_slices_sorted = sorted(dcm_slices, key=lambda dcm: int(dcm.InstanceNumber))

# save as npy_volume
npy_volume = np.array([dcm.pixel_array for dcm in dcm_slices_sorted])

# reverse volumne if patient position defer from standard
if dcm.PatientPosition == "FFS":
npy_volume = npy_volume[::-1]

# TODO: normalize numpy?
#npy_volume = np.interp(npy_volume, (npy_volume.min(), npy_volume.max()), (-3024, 3071))
#npy_volume[npy_volume > 3071] = 3071
#npy_volume[npy_volume < -3024] = -3024

return npy_volume


def normalize(img):

img = img.astype(np.float32)
img = (img - CONTRAST_HU_MIN) / (CONTRAST_HU_MAX - CONTRAST_HU_MIN)
img = np.clip(img, 0., 1.) - CONTRAST_HU_MEAN
return img


def format_img(img):
"""reshape, normalize image and convert to tensor"""

num_slices = img.shape[0]
num_windows = num_slices - 24 + 1

# rescale
interpolation=cv2.INTER_AREA
img = util.resize_slice_wise(img, (208,208), interpolation)

# crop
row = (img.shape[-2] - 192) // 2
col = (img.shape[-1] - 192) // 2
img = img[:,row : row + 192, col : col + 192]

# noramlize Hounsfield Units
img_normalized = normalize(img)

# expand dimention for tensor
img_split = np.array([img_normalized[i:i+24] for i in range(num_windows)])
img_expand = [np.expand_dims(np.expand_dims(split, axis=0), axis=0) for split in img_split]

# create torch tensor
study_windows = [torch.from_numpy(np.array(window)) for window in img_expand]

return study_windows

0 comments on commit 2af3eee

Please sign in to comment.