Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug fix/export anisotropic annotations #64

Merged
Merged
2 changes: 1 addition & 1 deletion annotationweb/static/annotationweb/annotationweb.js
Original file line number Diff line number Diff line change
Expand Up @@ -569,4 +569,4 @@ function zoomAtMousePosition(mouseX, mouseY) {
// Have to convert imagedata to canvas for this to work
let background = g_context.getImageData(mouseX - 50, mouseY - 50, 100, 100);
g_context.drawImage(imageDataToCanvas(background), mouseX - 100, mouseY - 100, 200, 200);
}
}
2 changes: 1 addition & 1 deletion boundingbox/static/boundingbox/boundingbox.js
Original file line number Diff line number Diff line change
Expand Up @@ -258,4 +258,4 @@ function redrawSequence() {
var index = g_currentFrameNr - g_startFrame;
g_context.drawImage(g_sequence[index], 0, 0, g_canvasWidth, g_canvasHeight);
redraw();
}
}
16 changes: 14 additions & 2 deletions common/metaimage.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ def __init__(self, filename=None, data=None, channels=False):
self.attributes = {}
self.attributes['ElementSpacing'] = [1, 1, 1]
self.attributes['ElementNumberOfChannels'] = 1
self.attributes['Offset'] = [0, 0]
if filename is not None:
self.read(filename)
else:
Expand Down Expand Up @@ -48,6 +49,8 @@ def read(self, filename):
self.attributes[parts[0].strip()] = parts[1].strip()
if parts[0].strip() == 'ElementSpacing':
self.attributes['ElementSpacing'] = [float(x) for x in self.attributes['ElementSpacing'].split()]
if parts[0].strip() == 'Offset':
self.attributes['Offset'] = [float(x) for x in self.attributes['Offset'].split()]

dims = self.attributes['DimSize'].split(' ')
if len(dims) == 2:
Expand All @@ -73,7 +76,7 @@ def read(self, filename):
# Read uncompressed raw file (.raw)
self.data = np.fromfile(os.path.join(base_path, self.attributes['ElementDataFile']), dtype=np.uint8)


# TODO: are L80-84 a duplicate of L55-59?
dims = self.attributes['DimSize'].split(' ')
if len(dims) == 2:
self.dim_size = (int(dims[0]), int(dims[1]))
Expand Down Expand Up @@ -114,6 +117,14 @@ def set_spacing(self, spacing):
def get_spacing(self):
return self.attributes['ElementSpacing']

def set_origin(self, origin):
if len(origin) != 2 and len(origin) != 3:
raise ValueError('Origin must have 2 or 3 components')
self.attributes['Offset'] = origin

def get_origin(self):
return self.attributes['Offset']

def get_metaimage_type(self):
np_type = self.data.dtype
if np_type == np.float32:
Expand Down Expand Up @@ -147,12 +158,13 @@ def write(self, filename, compress=False, compression_level=-1):
f.write('ElementType = ' + self.get_metaimage_type() + '\n')
f.write('ElementSpacing = ' + tuple_to_string(self.attributes['ElementSpacing']) + '\n')
f.write('ElementNumberOfChannels = ' + str(self.attributes['ElementNumberOfChannels']) + '\n')
f.write('Offset = ' + tuple_to_string(self.attributes['Offset']) + '\n')
if compress:
compressed_raw_data = zlib.compress(raw_data, compression_level)
f.write('CompressedData = True\n')
f.write('CompressedDataSize = ' + str(len(compressed_raw_data)) + '\n')
for key, value in self.attributes.items():
if key not in ['NDims', 'DimSize', 'ElementType', 'ElementDataFile', 'CompressedData', 'CompressedDataSize', 'ElementSpacing', 'ElementNumberOfChannels']:
if key not in ['NDims', 'DimSize', 'ElementType', 'ElementDataFile', 'CompressedData', 'CompressedDataSize', 'ElementSpacing', 'ElementNumberOfChannels', 'Offset']:
f.write(key + ' = ' + value + '\n')
f.write('ElementDataFile = ' + raw_filename + '\n')

Expand Down
187 changes: 117 additions & 70 deletions exporters/spline_segmentation_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,68 +163,10 @@ def get_object_segmentation(self, image_size, frame):
for label in labels:
objects = ControlPoint.objects.filter(label=label, image=frame).only('object').distinct()
for object in objects:
previous_x = None
previous_y = None
xy = []
control_points = ControlPoint.objects.filter(label=label, image=frame, object=object.object).order_by('index')
max_index = len(control_points)
for i in range(max_index):
if i == 0:
first = max_index-1
else:
first = i-1
a = control_points[first]
b = control_points[i]
c = control_points[(i+1) % max_index]
d = control_points[(i+2) % max_index]
length = sqrt((b.x - c.x)*(b.x - c.x) + (b.y - c.y)*(b.y - c.y))
# Not a very elegant solution ... could try to estimate the spline length instead
# or draw straight lines between consecutive points instead
step_size = min(0.01, 1.0 / (length*2))
for t in np.arange(0, 1, step_size):
x = (2 * t * t * t - 3 * t * t + 1) * b.x + \
(1 - tension) * (t * t * t - 2.0 * t * t + t) * (c.x - a.x) + \
(-2 * t * t * t + 3 * t * t) * c.x + \
(1 - tension) * (t * t * t - t * t) * (d.x - b.x)
y = (2 * t * t * t - 3 * t * t + 1) * b.y + \
(1 - tension) * (t * t * t - 2.0 * t * t + t) * (c.y - a.y) + \
(-2 * t * t * t + 3 * t * t) * c.y + \
(1 - tension) * (t * t * t - t * t) * (d.y - b.y)

# Round and snap to borders
x = int(round(x))
x = min(image_size[1]-1, max(0, x))
y = int(round(y))
y = min(image_size[0]-1, max(0, y))

if previous_x is not None and (abs(previous_x - x) > 1 or abs(previous_y - y) > 1):
# Draw a straight line between the points
end_pos = np.array([x,y])
start_pos = np.array([previous_x,previous_y])
direction = end_pos - start_pos
segment_length = np.linalg.norm(end_pos - start_pos)
direction = direction / segment_length # Normalize
for i in np.arange(0.0, np.ceil(segment_length), 0.5):
current = start_pos + direction * (float(i)/np.ceil(segment_length))
current = np.round(current).astype(np.int32)
current[0] = min(image_size[1]-1, max(0, current[0]))
current[1] = min(image_size[0]-1, max(0, current[1]))
segmentation[current[1], current[0]] = counter

previous_x = x
previous_y = y

xy.append(previous_x)
xy.append(previous_y)

segmentation[y, x] = counter

xy_new = [xy[j:j + 2] for j in range(0, len(xy), 2)]

if i == max_index-1 and xy_new_temp != xy_new:
coordinates.append(xy_new)
coordinates.append(a.label)
xy_new_temp = xy_new
control_points = ControlPoint.objects.filter(label=label, image=frame, object=object.object).order_by(
'index')
segmentation, (coordinates, xy_new_temp) = self.draw_segmentation(image_size, control_points, counter,
segmentation, coordinates, xy_new_temp)

# Fill the hole
segmentation[binary_fill_holes(segmentation == counter)] = counter
Expand All @@ -233,21 +175,124 @@ def get_object_segmentation(self, image_size, frame):

return segmentation, coordinates

@staticmethod
def draw_segmentation(image_size, control_points, label: int = 1, canvas: np.ndarray = None,
ret_coordinates: list = [], xy_new_temp = 0, tension: float = 0.5):
if canvas is None:
canvas = np.zeros(image_size, dtype=np.uint8)

previous_x = None
previous_y = None
xy = []
coordinates = []
max_index = len(control_points)
for i in range(max_index):
if i == 0:
first = max_index - 1
else:
first = i - 1
a = control_points[first]
b = control_points[i]
c = control_points[(i + 1) % max_index]
d = control_points[(i + 2) % max_index]
length = sqrt((b.x - c.x) * (b.x - c.x) + (b.y - c.y) * (b.y - c.y))
# Not a very elegant solution ... could try to estimate the spline length instead
# or draw straight lines between consecutive points instead
step_size = min(0.01, 1.0 / (length * 2))
for t in np.arange(0, 1, step_size):
x = (2 * t * t * t - 3 * t * t + 1) * b.x + \
(1 - tension) * (t * t * t - 2.0 * t * t + t) * (c.x - a.x) + \
(-2 * t * t * t + 3 * t * t) * c.x + \
(1 - tension) * (t * t * t - t * t) * (d.x - b.x)
y = (2 * t * t * t - 3 * t * t + 1) * b.y + \
(1 - tension) * (t * t * t - 2.0 * t * t + t) * (c.y - a.y) + \
(-2 * t * t * t + 3 * t * t) * c.y + \
(1 - tension) * (t * t * t - t * t) * (d.y - b.y)

# Round and snap to borders
x = int(round(x))
x = min(image_size[1] - 1, max(0, x))
y = int(round(y))
y = min(image_size[0] - 1, max(0, y))

if previous_x is not None and (abs(previous_x - x) > 1 or abs(previous_y - y) > 1):
# Draw a straight line between the points
end_pos = np.array([x, y])
start_pos = np.array([previous_x, previous_y])
direction = end_pos - start_pos
segment_length = np.linalg.norm(end_pos - start_pos)
direction = direction / segment_length # Normalize
for i in np.arange(0.0, np.ceil(segment_length), 0.5):
current = start_pos + direction * (float(i) / np.ceil(segment_length))
current = np.round(current).astype(np.int32)
current[0] = min(image_size[1] - 1, max(0, current[0]))
current[1] = min(image_size[0] - 1, max(0, current[1]))
canvas[current[1], current[0]] = label

previous_x = x
previous_y = y

xy.append(previous_x)
xy.append(previous_y)

canvas[y, x] = label

xy_new = [xy[j:j + 2] for j in range(0, len(xy), 2)]

if i == max_index - 1 and xy_new_temp != xy_new:
ret_coordinates.append(xy_new)
ret_coordinates.append(a.label)
xy_new_temp = xy_new
return canvas, (ret_coordinates, xy_new_temp)

@staticmethod
def compute_scaling(image_size, spacing):
if len(spacing) == 2:
aspect_ratio = image_size[0] / image_size[1]
new_aspect_ratio = image_size[0] * spacing[0] / (image_size[1] * spacing[1])
scale = new_aspect_ratio / aspect_ratio
pixel_scaling = np.divide(image_size, np.multiply(image_size, scale).astype(int))
else:
raise NotImplementedError('3D segmentations not implemented yet')
return pixel_scaling

def save_segmentation(self, frame, image_size, filename, spacing, json_annotations):
image_size = [image_size[1], image_size[0]]

# Create compounded segmentation object
segmentation, coords = self.get_object_segmentation(image_size, frame)
if np.any(spacing != 1):
print('Anisotropic image detected')
segmentation = np.zeros(image_size, dtype=np.uint8)
labels = Label.objects.filter(task=frame.image_annotation.task).order_by('id')
scaling = self.compute_scaling(image_size, spacing)
# TODO: NotImplementedError will be triggered if we are dealing with 3D data
coords = []
xy_new_temp = 0
for label, label_id in enumerate(labels):
objects = ControlPoint.objects.filter(label=label_id, image=frame).only('object').distinct()
for object in objects:
control_points = ControlPoint.objects.filter(label=label_id, image=frame,
object=object.object).order_by('index')
for point in control_points:
point.x *= scaling[0]
# Update segmentation
object_segmentation, (coords, xy_new_temp) = self.draw_segmentation(image_size, control_points,
ret_coordinates=coords,
xy_new_temp=xy_new_temp)
object_segmentation[binary_fill_holes(object_segmentation == 1)] = 1
segmentation[object_segmentation == 1] = label + 1
else:
# Create compounded segmentation object
segmentation, coords = self.get_object_segmentation(image_size, frame)

image_filename = frame.image_annotation.image.format.replace('#', str(frame.frame_nr))
if image_filename.endswith('.mhd'):
image_mhd = MetaImage(filename=image_filename)
image_array = image_mhd.get_pixel_data()
else:
image_pil = PIL.Image.open(image_filename)
image_array = np.asarray(image_pil)
if json_annotations:
image_filename = frame.image_annotation.image.format.replace('#', str(frame.frame_nr))
if image_filename.endswith('.mhd'):
image_mhd = MetaImage(filename=image_filename)
image_array = image_mhd.get_pixel_data()
else:
image_pil = PIL.Image.open(image_filename)
image_array = np.asarray(image_pil)
image_data = img_arr_to_b64(image_array)
json_dict = create_json(coords, image_size, filename, image_data)
with open(filename[:-7] + '.json', "w") as f:
Expand All @@ -258,6 +303,8 @@ def save_segmentation(self, frame, image_size, filename, spacing, json_annotatio
else:
segmentation_mhd = MetaImage(data=segmentation)
segmentation_mhd.set_attribute('ImageQuality', frame.image_annotation.image_quality)
if 'image_mhd' in locals():
segmentation_mhd.set_attribute('Offset', image_mhd.get_origin())
segmentation_mhd.set_spacing(spacing)
metadata = ImageMetadata.objects.filter(image=frame.image_annotation.image)
for item in metadata:
Expand Down