Skip to content

Commit

Permalink
checkpoints updataed, scripts updated, dataset links updated
Browse files Browse the repository at this point in the history
  • Loading branch information
abzargar committed Apr 20, 2023
1 parent 264b718 commit d02dc2d
Show file tree
Hide file tree
Showing 14 changed files with 191 additions and 2,056 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ To download our datasets go to https://deepseas.org/datasets/ or:

* Link to [Original annotated dataset](https://drive.google.com/drive/folders/13RhhBAetSWkjySyhJcDqj_FaO09hxkhO?usp=sharing)

* Link to [dataset example for cell segmentation](https://drive.google.com/drive/folders/1gJIkwUQEtut4JCCoUXUcKUWp2gVYxQ9P?usp=sharing)
* Link to [dataset example for cell segmentation](https://drive.google.com/drive/folders/18odgkzafW8stHkzME_s7Es-ue7odVAc5?usp=sharing)

* Link to [dataset example for cell tracking](https://drive.google.com/drive/folders/17n0Ex8NQS-REB5ZAMlntVnYBnSmZJtLR?usp=sharing)
* Link to [dataset example for cell tracking](https://drive.google.com/drive/folders/10LWey85fgHgFj_myIr1CYSOviD4SleE4?usp=sharing)

### Pre-trained models
They are saved in the folder "trained_models".
Expand Down
20 changes: 10 additions & 10 deletions deepsea/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def __init__(self, images_dir: str, masks_dir: str,unetwmaps_dir: str, transform
if if_train_aug:
self.ids = [os.path.splitext(file)[0] for file in sorted(os.listdir(images_dir)) if not file.startswith('.')]
tmp=[]
for i in range(train_aug_iter):
for i in range(train_aug_iter+1):
tmp+=self.ids
self.ids=tmp
else:
Expand All @@ -42,14 +42,17 @@ def __getitem__(self, idx):
assert len(mask_file) == 1, f'Either no mask or multiple masks found for the ID {name}: {mask_file}'
assert len(img_file) == 1, f'Either no image or multiple images found for the ID {name}: {img_file}'

mask = cv2.imread(mask_file[0].as_posix())[:, :, 0] > 0
wmap=cv2.imread(unetwmap_file[0].as_posix())[:, :, 0]
mask = cv2.imread(mask_file[0].as_posix(),0) > 0

mask=mask & (wmap<255)
mask = Image.fromarray(np.uint8(mask.astype('float32') * 255))
wmap = Image.fromarray(np.uint8(wmap))
wmap=cv2.imread(unetwmap_file[0].as_posix(),0)>0


mask=mask & (wmap<1)
wmap=wmap.astype('float32')
mask = mask.astype('float32')
img=cv2.imread(img_file[0].as_posix(),0).astype('float32')
img=(255 * ((img - img.min()) / (img.ptp()+1e-6))).astype(np.uint8)

img = Image.open(img_file[0])


assert img.size == mask.size, \
Expand Down Expand Up @@ -169,9 +172,6 @@ def preprocess(cls, pil_img_prev, pil_curr,pil_mask,transforms):

def __getitem__(self, idx):
img_prev,img_curr,mask = self.ids[idx]
img_prev=Image.fromarray(img_prev)
img_curr = Image.fromarray(img_curr)
mask = Image.fromarray(mask)
tensor_img_prev,tensor_img_curr,tensor_mask = self.preprocess(img_prev,img_curr,mask,self.transforms)

return {
Expand Down
82 changes: 9 additions & 73 deletions deepsea/evaluate_test_set_segmentation.py
Original file line number Diff line number Diff line change
@@ -1,66 +1,3 @@
<<<<<<< HEAD:evaluate_test_set_segmentation.py
import torch.utils.data as data
import segmentation_transforms as transforms
import numpy as np
import argparse
import os
import random
from model import DeepSeaSegmentation
from data import BasicSegmentationDataset
import torch
from evaluate import evaluate_segmentation
from utils import get_n_params

SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True


def test(args,image_size = [383,512],image_means = [0.5],image_stds= [0.5],batch_size=1):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

test_transforms = transforms.Compose([
transforms.Resize(image_size),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize(mean = image_means,
std = image_stds)
])


test_data = BasicSegmentationDataset(os.path.join(args.test_set_dir, 'images'), os.path.join(args.test_set_dir, 'masks'),os.path.join(args.test_set_dir, 'wmaps'),transforms=test_transforms)

test_iterator = data.DataLoader(test_data,batch_size = batch_size,shuffle=False)

model=DeepSeaSegmentation(n_channels=1, n_classes=2, bilinear=True)
print('INFO: Num of model parameters:',get_n_params(model))
model.load_state_dict(torch.load(args.ckpt_dir))
model = model.to(device)

test_score, test_avg_precision,test_easy_avg_precision,test_hard_avg_precision = evaluate_segmentation(model, test_iterator, device,len(test_data),is_avg_prec=True,prec_thresholds=[0.5,0.6,0.7,0.8,0.9],output_dir=args.output_dir)
print('INFO: Dice score:', test_score)
print('INFO: Average precision at ordered thresholds:', test_avg_precision)
print('INFO: Easy samples average precision at ordered thresholds:', test_easy_avg_precision)
print('INFO: Hard samples average precision at ordered thresholds:', test_hard_avg_precision)

if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("--test_set_dir",required=True,type=str,help="path for the test dataset")
ap.add_argument("--ckpt_dir",required=True,type=str,help="path for the checkpoint of segmentation model to test")
ap.add_argument("--output_dir", required=True, type=str, help="path for saving the test outputs")

args = ap.parse_args()

assert os.path.isdir(args.test_set_dir), 'No such file or directory: ' + args.test_set_dir
if not os.path.isdir(os.path.join(args.output_dir,'input_segmentation_images')):
os.makedirs(os.path.join(args.output_dir,'input_segmentation_images'))
if not os.path.isdir(os.path.join(args.output_dir,'segmentation_predictions')):
os.makedirs(os.path.join(args.output_dir,'segmentation_predictions'))

=======
import torch.utils.data as data
import segmentation_transforms as transforms
import numpy as np
Expand All @@ -85,15 +22,15 @@ def test(args,image_size = [383,512],image_means = [0.5],image_stds= [0.5],batch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

test_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(image_size),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize(mean = image_means,
std = image_stds)
])


test_data = BasicSegmentationDataset(os.path.join(args.test_dir, 'images'), os.path.join(args.test_dir, 'masks'),os.path.join(args.test_dir, 'wmaps'),transforms=test_transforms)
test_data = BasicSegmentationDataset(os.path.join(args.test_set_dir, 'images'), os.path.join(args.test_set_dir, 'masks'),os.path.join(args.test_set_dir, 'wmaps'),transforms=test_transforms)

test_iterator = data.DataLoader(test_data,batch_size = batch_size,shuffle=False)

Expand All @@ -110,17 +47,16 @@ def test(args,image_size = [383,512],image_means = [0.5],image_stds= [0.5],batch

if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("--test_dir",required=True,type=str,help="path for the test dataset")
ap.add_argument("--test_set_dir",required=True,type=str,help="path for the test dataset")
ap.add_argument("--ckpt_dir",required=True,type=str,help="path for the checkpoint of segmentation model to test")
ap.add_argument("--output_dir", required=True, type=str, help="path for saving the test outputs")

args = ap.parse_args()

assert os.path.isdir(args.test_dir), 'No such file or directory: ' + args.test_dir
if not os.path.isdir(args.output_dir+'/input_segmentation_images'):
os.makedirs(args.output_dir+'/input_segmentation_images')
if not os.path.isdir(args.output_dir+'/segmentation_predictions'):
os.makedirs(args.output_dir+'/segmentation_predictions')
assert os.path.isdir(args.test_set_dir), 'No such file or directory: ' + args.test_set_dir
if not os.path.isdir(os.path.join(args.output_dir,'input_segmentation_images')):
os.makedirs(os.path.join(args.output_dir,'input_segmentation_images'))
if not os.path.isdir(os.path.join(args.output_dir,'segmentation_predictions')):
os.makedirs(os.path.join(args.output_dir,'segmentation_predictions'))

>>>>>>> 688bc95bb88028284a86f389a4e204f93b8d0f83:deepsea/test_segmentation.py
test(args)
test(args)
82 changes: 8 additions & 74 deletions deepsea/evaluate_test_set_tracking.py
Original file line number Diff line number Diff line change
@@ -1,69 +1,3 @@
<<<<<<< HEAD:evaluate_test_set_tracking.py
import os
import torch.utils.data as data
import tracker_transforms as transforms
import numpy as np
import argparse
import random
from model import DeepSeaTracker
from data import BasicTrackerDataset
import torch
from evaluate import evaluate_tracker
from utils import get_n_params

SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True



def test(args,image_size = [128,128],image_means = [0.5],image_stds= [0.5],batch_size=1):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

test_transforms = transforms.Compose([
transforms.Resize(image_size),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize(mean = image_means,
std = image_stds)
])


test_data = BasicTrackerDataset(os.path.join(args.test_set_dir), transforms=test_transforms,if_test=True)
test_iterator = data.DataLoader(test_data,batch_size = batch_size)

model=DeepSeaTracker(n_channels=1, n_classes=2, bilinear=True)
print('INFO: Num of model parameters:',get_n_params(model))

model.load_state_dict(torch.load(args.ckpt_dir))
model = model.to(device)

test_score, test_avg_precision,test_single_cell_avg_precision,test_mitosis_avg_precision = evaluate_tracker(model, test_iterator, device,len(test_data),is_avg_prec=True,prec_thresholds=[0.2,0.6,0.7,0.8,0.9],output_dir=args.output_dir)

print('INFO: Dice score:', test_score)
print('INFO: Average precision:', test_avg_precision)
print('INFO: Single cells average precision:', test_single_cell_avg_precision)
print('INFO: Mitosis average precision:', test_mitosis_avg_precision)


if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("--test_set_dir",required=True,type=str,help="path for the test dataset")
ap.add_argument("--ckpt_dir",required=True,type=str,help="path for the checkpoint of tracking model to test")
ap.add_argument("--output_dir", required=True, type=str, help="path for saving the test outputs")

args = ap.parse_args()

assert os.path.isdir(args.test_set_dir), 'No such file or directory: ' + args.test_set_dir
if not os.path.isdir(os.path.join(args.output_dir,'input_crops')):
os.makedirs(os.path.join(args.output_dir,'input_crops'))
if not os.path.isdir(os.path.join(args.output_dir,'tracking_predictions')):
os.makedirs(os.path.join(args.output_dir,'tracking_predictions'))

=======
import os
import torch.utils.data as data
import tracker_transforms as transforms
Expand All @@ -89,6 +23,7 @@ def test(args,image_size = [128,128],image_means = [0.5],image_stds= [0.5],batch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

test_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(image_size),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
Expand All @@ -97,7 +32,7 @@ def test(args,image_size = [128,128],image_means = [0.5],image_stds= [0.5],batch
])


test_data = BasicTrackerDataset(os.path.join(args.test_dir), transforms=test_transforms,if_test=True)
test_data = BasicTrackerDataset(os.path.join(args.test_set_dir), transforms=test_transforms,if_test=True)
test_iterator = data.DataLoader(test_data,batch_size = batch_size)

model=DeepSeaTracker(n_channels=1, n_classes=2, bilinear=True)
Expand All @@ -116,17 +51,16 @@ def test(args,image_size = [128,128],image_means = [0.5],image_stds= [0.5],batch

if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("--test_dir",required=True,type=str,help="path for the test dataset")
ap.add_argument("--test_set_dir",required=True,type=str,help="path for the test dataset")
ap.add_argument("--ckpt_dir",required=True,type=str,help="path for the checkpoint of tracking model to test")
ap.add_argument("--output_dir", required=True, type=str, help="path for saving the test outputs")

args = ap.parse_args()

assert os.path.isdir(args.test_dir), 'No such file or directory: ' + args.test_dir
if not os.path.isdir(args.output_dir+'/input_tracking_images'):
os.makedirs(args.output_dir+'/input_tracking_images')
if not os.path.isdir(args.output_dir+'/tracking_predictions'):
os.makedirs(args.output_dir+'/tracking_predictions')
assert os.path.isdir(args.test_set_dir), 'No such file or directory: ' + args.test_set_dir
if not os.path.isdir(os.path.join(args.output_dir,'input_crops')):
os.makedirs(os.path.join(args.output_dir,'input_crops'))
if not os.path.isdir(os.path.join(args.output_dir,'tracking_predictions')):
os.makedirs(os.path.join(args.output_dir,'tracking_predictions'))

>>>>>>> 688bc95bb88028284a86f389a4e204f93b8d0f83:deepsea/test_tracker.py
test(args)
3 changes: 1 addition & 2 deletions deepsea/measure_MOTA.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ def main(args,seg_img_size= [383,512],tracking_image_size = [128,128],image_mean

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
seg_transforms = segmentation_transforms.Compose([
segmentation_transforms.ToPILImage(),
segmentation_transforms.Resize(seg_img_size),
segmentation_transforms.Grayscale(num_output_channels=1),
segmentation_transforms.ToTensor(),
segmentation_transforms.Normalize(mean = image_means,
std = image_stds)
Expand Down Expand Up @@ -99,7 +99,6 @@ def main(args,seg_img_size= [383,512],tracking_image_size = [128,128],image_mean
ap.add_argument("--output_dir", required=True, type=str, help="path for saving the test outputs")

args = ap.parse_args()

assert os.path.isdir(args.single_test_set_dir), 'No such file or directory: ' + args.single_test_set_dir

if os.path.isdir(args.output_dir):
Expand Down
Loading

0 comments on commit d02dc2d

Please sign in to comment.