diff --git a/armory/__init__.py b/armory/__init__.py index d895da076..76ef29ac4 100644 --- a/armory/__init__.py +++ b/armory/__init__.py @@ -8,7 +8,7 @@ # Semantic Version -__version__ = "0.13.1" +__version__ = "0.13.2" # Submodule imports diff --git a/armory/baseline_models/tf_graph/mscoco_frcnn.py b/armory/baseline_models/tf_graph/mscoco_frcnn.py index dd059de5f..e5b7098ce 100644 --- a/armory/baseline_models/tf_graph/mscoco_frcnn.py +++ b/armory/baseline_models/tf_graph/mscoco_frcnn.py @@ -9,63 +9,29 @@ import tensorflow as tf -class TensorFlowFasterRCNNOneIndexed(TensorFlowFasterRCNN): - """ - This is an MSCOCO pre-trained model. Note that the inherited TensorFlowFasterRCMM class - outputs 0-indexed classes, while this wrapper class outputs 1-indexed classes. A label map can be found at - https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt - - This model only performs inference and is not trainable. To train - or fine-tune this model, please follow instructions at - https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1.md - """ - - def __init__(self, images): - super().__init__( - images, - model=None, - filename="faster_rcnn_resnet50_coco_2018_01_28", - url="http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_coco_2018_01_28.tar.gz", - sess=None, - is_training=False, - clip_values=(0, 1), - channels_first=False, - preprocessing_defences=None, - postprocessing_defences=None, - attack_losses=( - "Loss/RPNLoss/localization_loss", - "Loss/RPNLoss/objectness_loss", - "Loss/BoxClassifierLoss/localization_loss", - "Loss/BoxClassifierLoss/classification_loss", - ), - ) - - def compute_loss(self, x, y): - raise NotImplementedError - - def loss_gradient(self, x, y, **kwargs): - y_zero_indexed = [] - for y_dict in y: - y_dict_zero_indexed = y_dict.copy() - y_dict_zero_indexed["labels"] = y_dict_zero_indexed["labels"] - 1 - y_zero_indexed.append(y_dict_zero_indexed) - return super().loss_gradient(x, y_zero_indexed, **kwargs) - - def predict(self, x, **kwargs): - list_of_zero_indexed_pred_dicts = super().predict(x, **kwargs) - list_of_one_indexed_pred_dicts = [] - for img_pred_dict in list_of_zero_indexed_pred_dicts: - zero_indexed_pred_labels = img_pred_dict["labels"] - img_pred_dict["labels"] = zero_indexed_pred_labels + 1 - list_of_one_indexed_pred_dicts.append(img_pred_dict) - return list_of_one_indexed_pred_dicts - - def get_art_model(model_kwargs, wrapper_kwargs, weights_file=None): # APRICOT inputs should have shape (1, None, None, 3) while DAPRICOT inputs have shape # (3, None, None, 3) images = tf.placeholder( tf.float32, shape=(model_kwargs.get("batch_size", 1), None, None, 3) ) - model = TensorFlowFasterRCNNOneIndexed(images) + model = TensorFlowFasterRCNN( + images, + model=None, + filename="faster_rcnn_resnet50_coco_2018_01_28", + url="http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_coco_2018_01_28.tar.gz", + sess=None, + is_training=False, + clip_values=(0, 1), + channels_first=False, + preprocessing_defences=None, + postprocessing_defences=None, + attack_losses=( + "Loss/RPNLoss/localization_loss", + "Loss/RPNLoss/objectness_loss", + "Loss/BoxClassifierLoss/localization_loss", + "Loss/BoxClassifierLoss/classification_loss", + ), + ) + return model diff --git a/armory/data/adversarial/apricot_metadata.py b/armory/data/adversarial/apricot_metadata.py index 094411a82..74ccc9f9b 100644 --- a/armory/data/adversarial/apricot_metadata.py +++ b/armory/data/adversarial/apricot_metadata.py @@ -22,7 +22,7 @@ APRICOT_PATCHES = { 0: { "adv_model": 0, - "adv_target": 53, + "adv_target": 52, "id": 0, "is_circle": True, "is_square": False, @@ -30,7 +30,7 @@ }, 1: { "adv_model": 0, - "adv_target": 27, + "adv_target": 26, "id": 1, "is_circle": True, "is_square": False, @@ -38,7 +38,7 @@ }, 2: { "adv_model": 0, - "adv_target": 44, + "adv_target": 43, "id": 2, "is_circle": True, "is_square": False, @@ -46,7 +46,7 @@ }, 3: { "adv_model": 0, - "adv_target": 17, + "adv_target": 16, "id": 3, "is_circle": True, "is_square": False, @@ -54,7 +54,7 @@ }, 4: { "adv_model": 0, - "adv_target": 85, + "adv_target": 84, "id": 4, "is_circle": True, "is_square": False, @@ -62,7 +62,7 @@ }, 5: { "adv_model": 0, - "adv_target": 73, + "adv_target": 72, "id": 5, "is_circle": True, "is_square": False, @@ -70,7 +70,7 @@ }, 6: { "adv_model": 0, - "adv_target": 78, + "adv_target": 77, "id": 6, "is_circle": True, "is_square": False, @@ -78,7 +78,7 @@ }, 7: { "adv_model": 0, - "adv_target": 1, + "adv_target": 0, "id": 7, "is_circle": True, "is_square": False, @@ -86,7 +86,7 @@ }, 8: { "adv_model": 0, - "adv_target": 64, + "adv_target": 63, "id": 8, "is_circle": True, "is_square": False, @@ -94,7 +94,7 @@ }, 9: { "adv_model": 0, - "adv_target": 33, + "adv_target": 32, "id": 9, "is_circle": True, "is_square": False, @@ -102,7 +102,7 @@ }, 10: { "adv_model": 0, - "adv_target": 53, + "adv_target": 52, "id": 10, "is_circle": False, "is_square": True, @@ -110,7 +110,7 @@ }, 11: { "adv_model": 0, - "adv_target": 27, + "adv_target": 26, "id": 11, "is_circle": False, "is_square": True, @@ -118,7 +118,7 @@ }, 12: { "adv_model": 0, - "adv_target": 44, + "adv_target": 43, "id": 12, "is_circle": False, "is_square": True, @@ -126,7 +126,7 @@ }, 13: { "adv_model": 0, - "adv_target": 17, + "adv_target": 16, "id": 13, "is_circle": False, "is_square": True, @@ -134,7 +134,7 @@ }, 14: { "adv_model": 0, - "adv_target": 85, + "adv_target": 84, "id": 14, "is_circle": False, "is_square": True, @@ -142,7 +142,7 @@ }, 15: { "adv_model": 0, - "adv_target": 73, + "adv_target": 72, "id": 15, "is_circle": False, "is_square": True, @@ -150,7 +150,7 @@ }, 16: { "adv_model": 0, - "adv_target": 78, + "adv_target": 77, "id": 16, "is_circle": False, "is_square": True, @@ -158,7 +158,7 @@ }, 17: { "adv_model": 0, - "adv_target": 1, + "adv_target": 0, "id": 17, "is_circle": False, "is_square": True, @@ -166,7 +166,7 @@ }, 18: { "adv_model": 0, - "adv_target": 64, + "adv_target": 63, "id": 18, "is_circle": False, "is_square": True, @@ -174,7 +174,7 @@ }, 19: { "adv_model": 0, - "adv_target": 33, + "adv_target": 32, "id": 19, "is_circle": False, "is_square": True, @@ -182,7 +182,7 @@ }, 20: { "adv_model": 1, - "adv_target": 53, + "adv_target": 52, "id": 20, "is_circle": True, "is_square": False, @@ -190,7 +190,7 @@ }, 21: { "adv_model": 1, - "adv_target": 27, + "adv_target": 26, "id": 21, "is_circle": True, "is_square": False, @@ -198,7 +198,7 @@ }, 22: { "adv_model": 1, - "adv_target": 44, + "adv_target": 43, "id": 22, "is_circle": True, "is_square": False, @@ -206,7 +206,7 @@ }, 23: { "adv_model": 1, - "adv_target": 17, + "adv_target": 16, "id": 23, "is_circle": True, "is_square": False, @@ -214,7 +214,7 @@ }, 24: { "adv_model": 1, - "adv_target": 85, + "adv_target": 84, "id": 24, "is_circle": True, "is_square": False, @@ -222,7 +222,7 @@ }, 25: { "adv_model": 1, - "adv_target": 73, + "adv_target": 72, "id": 25, "is_circle": True, "is_square": False, @@ -230,7 +230,7 @@ }, 26: { "adv_model": 1, - "adv_target": 78, + "adv_target": 77, "id": 26, "is_circle": True, "is_square": False, @@ -238,7 +238,7 @@ }, 27: { "adv_model": 1, - "adv_target": 1, + "adv_target": 0, "id": 27, "is_circle": True, "is_square": False, @@ -246,7 +246,7 @@ }, 28: { "adv_model": 1, - "adv_target": 64, + "adv_target": 63, "id": 28, "is_circle": True, "is_square": False, @@ -254,7 +254,7 @@ }, 29: { "adv_model": 1, - "adv_target": 33, + "adv_target": 32, "id": 29, "is_circle": True, "is_square": False, @@ -262,7 +262,7 @@ }, 30: { "adv_model": 1, - "adv_target": 53, + "adv_target": 52, "id": 30, "is_circle": False, "is_square": True, @@ -270,7 +270,7 @@ }, 31: { "adv_model": 1, - "adv_target": 27, + "adv_target": 26, "id": 31, "is_circle": False, "is_square": True, @@ -278,7 +278,7 @@ }, 32: { "adv_model": 1, - "adv_target": 44, + "adv_target": 43, "id": 32, "is_circle": False, "is_square": True, @@ -286,7 +286,7 @@ }, 33: { "adv_model": 1, - "adv_target": 17, + "adv_target": 16, "id": 33, "is_circle": False, "is_square": True, @@ -294,7 +294,7 @@ }, 34: { "adv_model": 1, - "adv_target": 85, + "adv_target": 84, "id": 34, "is_circle": False, "is_square": True, @@ -302,7 +302,7 @@ }, 35: { "adv_model": 1, - "adv_target": 73, + "adv_target": 72, "id": 35, "is_circle": False, "is_square": True, @@ -310,7 +310,7 @@ }, 36: { "adv_model": 1, - "adv_target": 78, + "adv_target": 77, "id": 36, "is_circle": False, "is_square": True, @@ -318,7 +318,7 @@ }, 37: { "adv_model": 1, - "adv_target": 1, + "adv_target": 0, "id": 37, "is_circle": False, "is_square": True, @@ -326,7 +326,7 @@ }, 38: { "adv_model": 1, - "adv_target": 64, + "adv_target": 63, "id": 38, "is_circle": False, "is_square": True, @@ -334,7 +334,7 @@ }, 39: { "adv_model": 1, - "adv_target": 33, + "adv_target": 32, "id": 39, "is_circle": False, "is_square": True, @@ -342,7 +342,7 @@ }, 40: { "adv_model": 2, - "adv_target": 53, + "adv_target": 52, "id": 40, "is_circle": True, "is_square": False, @@ -350,7 +350,7 @@ }, 41: { "adv_model": 2, - "adv_target": 27, + "adv_target": 26, "id": 41, "is_circle": True, "is_square": False, @@ -358,7 +358,7 @@ }, 42: { "adv_model": 2, - "adv_target": 44, + "adv_target": 43, "id": 42, "is_circle": True, "is_square": False, @@ -366,7 +366,7 @@ }, 43: { "adv_model": 2, - "adv_target": 17, + "adv_target": 16, "id": 43, "is_circle": True, "is_square": False, @@ -374,7 +374,7 @@ }, 44: { "adv_model": 2, - "adv_target": 85, + "adv_target": 84, "id": 44, "is_circle": True, "is_square": False, @@ -382,7 +382,7 @@ }, 45: { "adv_model": 2, - "adv_target": 73, + "adv_target": 72, "id": 45, "is_circle": True, "is_square": False, @@ -390,7 +390,7 @@ }, 46: { "adv_model": 2, - "adv_target": 78, + "adv_target": 77, "id": 46, "is_circle": True, "is_square": False, @@ -398,7 +398,7 @@ }, 47: { "adv_model": 2, - "adv_target": 1, + "adv_target": 0, "id": 47, "is_circle": True, "is_square": False, @@ -406,7 +406,7 @@ }, 48: { "adv_model": 2, - "adv_target": 64, + "adv_target": 63, "id": 48, "is_circle": True, "is_square": False, @@ -414,7 +414,7 @@ }, 49: { "adv_model": 2, - "adv_target": 33, + "adv_target": 32, "id": 49, "is_circle": True, "is_square": False, @@ -422,7 +422,7 @@ }, 50: { "adv_model": 2, - "adv_target": 53, + "adv_target": 52, "id": 50, "is_circle": False, "is_square": True, @@ -430,7 +430,7 @@ }, 51: { "adv_model": 2, - "adv_target": 27, + "adv_target": 26, "id": 51, "is_circle": False, "is_square": True, @@ -438,7 +438,7 @@ }, 52: { "adv_model": 2, - "adv_target": 44, + "adv_target": 43, "id": 52, "is_circle": False, "is_square": True, @@ -446,7 +446,7 @@ }, 53: { "adv_model": 2, - "adv_target": 17, + "adv_target": 16, "id": 53, "is_circle": False, "is_square": True, @@ -454,7 +454,7 @@ }, 54: { "adv_model": 2, - "adv_target": 85, + "adv_target": 84, "id": 54, "is_circle": False, "is_square": True, @@ -462,7 +462,7 @@ }, 55: { "adv_model": 2, - "adv_target": 73, + "adv_target": 72, "id": 55, "is_circle": False, "is_square": True, @@ -470,7 +470,7 @@ }, 56: { "adv_model": 2, - "adv_target": 78, + "adv_target": 77, "id": 56, "is_circle": False, "is_square": True, @@ -478,7 +478,7 @@ }, 57: { "adv_model": 2, - "adv_target": 1, + "adv_target": 0, "id": 57, "is_circle": False, "is_square": True, @@ -486,7 +486,7 @@ }, 58: { "adv_model": 2, - "adv_target": 64, + "adv_target": 63, "id": 58, "is_circle": False, "is_square": True, @@ -494,7 +494,7 @@ }, 59: { "adv_model": 2, - "adv_target": 33, + "adv_target": 32, "id": 59, "is_circle": False, "is_square": True, diff --git a/armory/data/adversarial/dapricot_test.py b/armory/data/adversarial/dapricot_test.py new file mode 100644 index 000000000..ee789e7ec --- /dev/null +++ b/armory/data/adversarial/dapricot_test.py @@ -0,0 +1,357 @@ +"""dapricot_test dataset.""" + +import collections +import json +import os +import pandas + +import tensorflow.compat.v1 as tf +import tensorflow_datasets as tfds + +_DESCRIPTION = """ +LEGAL +----- +Copyright 2021 The MITRE Corporation. All rights reserved. +""" + +_CITATION = """ +Dataset is unpublished at this time. +""" + +_URLS = "https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/dapricot_test.tar.gz" + + +class DapricotTest(tfds.core.GeneratorBasedBuilder): + """DatasetBuilder for dapricot_test dataset.""" + + VERSION = tfds.core.Version("1.0.0") + RELEASE_NOTES = { + "1.0.0": "Initial release.", + } + + def _info(self) -> tfds.core.DatasetInfo: + """Returns the dataset metadata.""" + features = { + # all Sequences are for [camera_1, camera_2, camera_3] + "image": tfds.features.Sequence( + tfds.features.Image(shape=(None, None, 3)), # encoding_format="jpeg"), + length=3, + ), + "images": tfds.features.Sequence( + tfds.features.FeaturesDict( + { + "file_name": tfds.features.Text(), + "height": tf.int64, + "width": tf.int64, + "id": tf.int64, + } + ), + length=3, + ), + "categories": tfds.features.Sequence( + tfds.features.Sequence( + tfds.features.FeaturesDict( + { + "id": tf.int64, # {'octagon':12, 'diamond':26, 'rect':29} + "name": tfds.features.Text(), + } + ) + ), + length=3, + ), + "objects": tfds.features.Sequence( + tfds.features.Sequence( + { + "id": tf.int64, + "image_id": tf.int64, + "area": tf.int64, # un-normalized area + "boxes": tfds.features.BBoxFeature(), # normalized bounding box [ymin, xmin, ymax, xmax] + "labels": tfds.features.ClassLabel(num_classes=91), + "is_crowd": tf.bool, + } + ), + length=3, + ), + "patch_metadata": tfds.features.Sequence( + # these data only apply to the "green screen patch" objects + tfds.features.FeaturesDict( + { + "gs_coords": tfds.features.Sequence( + tfds.features.Tensor( + shape=[2], dtype=tf.int64 + ), # green screen vertices in (x,y) + ), + "cc_ground_truth": tfds.features.Tensor( + shape=[24, 3], dtype=tf.float32 + ), # colorchecker color ground truth + "cc_scene": tfds.features.Tensor( + shape=[24, 3], dtype=tf.float32 + ), # colorchecker colors in a scene + "shape": tfds.features.Text(), # "diamond", "rect", "octagon" + } + ), + length=3, + ), + } + + return tfds.core.DatasetInfo( + builder=self, + description=_DESCRIPTION, + features=tfds.features.FeaturesDict(features), + citation=_CITATION, + ) + + def _split_generators(self, dl_manager: tfds.download.DownloadManager): + """Returns SplitGenerators.""" + paths = dl_manager.download_and_extract(_URLS) + return [ + tfds.core.SplitGenerator( + name=patch_size, + gen_kwargs={"path": os.path.join(paths, "test"), "size": patch_size}, + ) + for patch_size in ["large", "medium", "small"] + ] + + def _generate_examples(self, path, size): + """yield examples""" + + scenes = [ + "02", + "03", + "04", + "05", + "07", + "08", + "09", + "10", + "11", + "12", + "13", + "15", + ] + + size_dist = {"small": "dist15", "medium": "dist10", "large": "dist5"} + + yield_id = 0 + # For each scene, read JSONs for all cameras. + # For each camera, go through each image. + # For each image, gets its annotations and yield relevant data + for scene in scenes: + + annotation_path_camera_1 = os.path.join( + path, "annotations/labels_scene_{}_camera_1.json".format(scene) + ) + annotation_path_camera_2 = os.path.join( + path, "annotations/labels_scene_{}_camera_2.json".format(scene) + ) + annotation_path_camera_3 = os.path.join( + path, "annotations/labels_scene_{}_camera_3.json".format(scene) + ) + + dapricot_camera_1 = DapricotAnnotation(annotation_path_camera_1) + dapricot_camera_2 = DapricotAnnotation(annotation_path_camera_2) + dapricot_camera_3 = DapricotAnnotation(annotation_path_camera_3) + + images_camera_1 = dapricot_camera_1.images() + images_camera_2 = dapricot_camera_2.images() + images_camera_3 = dapricot_camera_3.images() + + # sort images alphabetically so all three cameras are consistent + images_camera_1 = sorted( + images_camera_1, key=lambda x: x["file_name"].lower() + ) + images_camera_2 = sorted( + images_camera_2, key=lambda x: x["file_name"].lower() + ) + images_camera_3 = sorted( + images_camera_3, key=lambda x: x["file_name"].lower() + ) + + for image_camera_1, image_camera_2, image_camera_3 in zip( + images_camera_1, images_camera_2, images_camera_3 + ): + + # verify consistency + fname1 = image_camera_1[ + "file_name" + ] # fname has format "scene_#_camera_1___.JPG" + fname2 = image_camera_2["file_name"] + fname3 = image_camera_3["file_name"] + assert fname1 == ("_").join( + fname2.split("_")[:3] + ["1"] + fname2.split("_")[4:] + ), "{} and {} are inconsistent".format(fname1, fname2) + assert fname1 == ("_").join( + fname3.split("_")[:3] + ["1"] + fname3.split("_")[4:] + ), "{} and {} are inconsistent".format(fname1, fname3) + + # get object annotations for each image + annotations_camera_1 = dapricot_camera_1.get_annotations( + image_camera_1["id"] + ) + annotations_camera_2 = dapricot_camera_2.get_annotations( + image_camera_2["id"] + ) + annotations_camera_3 = dapricot_camera_3.get_annotations( + image_camera_3["id"] + ) + + # convert bbox to Pytorch format + def build_bbox(x, y, width, height): + return tfds.features.BBox( + ymin=y + / image_camera_1[ + "height" + ], # all images are the same size, so using image_camera_1 is fine + xmin=x / image_camera_1["width"], + ymax=(y + height) / image_camera_1["height"], + xmax=(x + width) / image_camera_1["width"], + ) + + # convert segmentation format of (x0,y0,x1,y1,...) to ( (x0, y0), (x1, y1), ... ) + def build_coords(segmentation): + xs = segmentation[::2] + ys = segmentation[1::2] + coords = [[int(round(x)), int(round(y))] for (x, y) in zip(xs, ys)] + + return coords + + # convert green screen shape given in file name to shape expected in downstream algorithms + def get_shape(in_shape): + out_shape = {"stp": "octagon", "pxg": "diamond", "spd": "rect"} + return out_shape[in_shape] + + # get colorchecker color box values. There are 24 color boxes, so output shape is (24, 3) + def get_cc(ground_truth=True, scene=None, camera=None): + if ground_truth: + return ( + pandas.read_csv( + os.path.join( + path, + "annotations", + "xrite_passport_colors_sRGB-GMB-2005.csv", + ), + header=None, + ) + .to_numpy() + .astype("float32") + ) + else: + return ( + pandas.read_csv( + os.path.join( + path, + "annotations", + "scene_{}_camera_{}_CC_values.csv".format( + scene, camera + ), + ), + header=None, + ) + .to_numpy() + .astype("float32") + ) + + example = { + "image": [ + os.path.join( + path, + "scene_{}/camera_{}".format(scene, camera + 1), + im_cam["file_name"], + ) + for camera, im_cam in enumerate( + [image_camera_1, image_camera_2, image_camera_3] + ) + ], + "images": [image_camera_1, image_camera_2, image_camera_3], + "categories": [ + d_cam.categories() + for d_cam in [ + dapricot_camera_1, + dapricot_camera_2, + dapricot_camera_3, + ] + ], + "objects": [ + [ + { + "id": anno["id"], + "image_id": anno["image_id"], + "area": anno["area"], + "boxes": build_bbox(*anno["bbox"]), + "labels": anno["category_id"], + "is_crowd": bool(anno["iscrowd"]), + } + for anno in annos + ] + for annos in [ + annotations_camera_1, + annotations_camera_2, + annotations_camera_3, + ] + ], + "patch_metadata": [ + [ + { + "gs_coords": build_coords(*anno["segmentation"]), + "cc_ground_truth": get_cc(), + "cc_scene": get_cc( + ground_truth=False, scene=scene, camera=camera + 1 + ), + "shape": get_shape( + im_info["file_name"].split("_")[4].lower() + ), # file_name has format "scene_#_camera_#___.JPG" + } + for anno in annos + if len(anno["segmentation"]) > 0 + ][0] + for camera, (annos, im_info) in enumerate( + zip( + [ + annotations_camera_1, + annotations_camera_2, + annotations_camera_3, + ], + [image_camera_1, image_camera_2, image_camera_3], + ) + ) + ], + } + + yield_id = yield_id + 1 + + patch_size = image_camera_1["file_name"].split(".")[ + 0 + ] # scene_#_camera_#___ + patch_size = patch_size.split("_")[-1].lower() # + if size_dist[size] == patch_size: + yield yield_id, example + + +class DapricotAnnotation(object): + """Dapricot annotation helper class.""" + + def __init__(self, annotation_path): + with tf.io.gfile.GFile(annotation_path) as f: + data = json.load(f) + self._data = data + + # for each images["id"], find all annotations such that annotations["image_id"] == images["id"] + img_id2annotations = collections.defaultdict(list) + for a in self._data["annotations"]: + img_id2annotations[a["image_id"]].append(a) + self._img_id2annotations = { + k: list(sorted(v, key=lambda a: a["id"])) + for k, v in img_id2annotations.items() + } + + def categories(self): + """Return the category dicts, as sorted in the file.""" + return self._data["categories"] + + def images(self): + """Return the image dicts, as sorted in the file.""" + return self._data["images"] + + def get_annotations(self, img_id): + """Return all annotations associated with the image id string.""" + return self._img_id2annotations.get(img_id, []) diff --git a/armory/data/adversarial_datasets.py b/armory/data/adversarial_datasets.py index dacb15d85..51db3566a 100644 --- a/armory/data/adversarial_datasets.py +++ b/armory/data/adversarial_datasets.py @@ -17,6 +17,7 @@ apricot_dev, apricot_test, dapricot_dev, + dapricot_test, ) @@ -321,11 +322,13 @@ def gtsrb_poison( def apricot_label_preprocessing(x, y): """ - Convert labels to list of dicts. If batch_size > 1, this will already be the case, - and y will simply be returned without modification. + Convert labels to list of dicts. If batch_size > 1, this will already be the case. + Decrement labels of non-patch objects by 1 to be 0-indexed """ if isinstance(y, dict): y = [y] + for y_dict in y: + y_dict["labels"] -= y_dict["labels"] != ADV_PATCH_MAGIC_NUMBER_LABEL_ID return y @@ -509,3 +512,37 @@ def dapricot_dev_adversarial( framework=framework, context=dapricot_adversarial_context, ) + + +def dapricot_test_adversarial( + split: str = "large+medium+small", + epochs: int = 1, + batch_size: int = 1, + dataset_dir: str = None, + preprocessing_fn: Callable = dapricot_canonical_preprocessing, + label_preprocessing_fn: Callable = dapricot_label_preprocessing, + cache_dataset: bool = True, + framework: str = "numpy", + shuffle_files: bool = False, +) -> datasets.ArmoryDataGenerator: + if batch_size != 1: + raise ValueError("D-APRICOT batch size must be set to 1") + + if split == "adversarial": + split = "small+medium+large" + + return datasets._generator_from_tfds( + "dapricot_test:1.0.0", + split=split, + batch_size=batch_size, + epochs=epochs, + dataset_dir=dataset_dir, + preprocessing_fn=preprocessing_fn, + label_preprocessing_fn=label_preprocessing_fn, + as_supervised=False, + supervised_xy_keys=("image", ("objects", "patch_metadata")), + shuffle_files=shuffle_files, + cache_dataset=cache_dataset, + framework=framework, + context=dapricot_adversarial_context, + ) diff --git a/armory/data/cached_s3_checksums/coco.txt b/armory/data/cached_s3_checksums/coco.txt new file mode 100644 index 000000000..fed135e87 --- /dev/null +++ b/armory/data/cached_s3_checksums/coco.txt @@ -0,0 +1 @@ +armory-public-data coco/coco_2017_1.1.0.tar.gz 26637020232 371cf143ba73767f6a5bdc7f39d9d7b5d61d59125e94cc2957de29430c727015 diff --git a/armory/data/cached_s3_checksums/dapricot_test.txt b/armory/data/cached_s3_checksums/dapricot_test.txt new file mode 100644 index 000000000..10409412a --- /dev/null +++ b/armory/data/cached_s3_checksums/dapricot_test.txt @@ -0,0 +1 @@ +armory-public-data adversarial-datasets/cached/dapricot_test_adversarial_1.0.0_cached.tar.gz 289432599 608d5b3b37fea7c2f2716cc92d7f227ec93ee222ed5006a677c930d221464666 diff --git a/armory/data/datasets.py b/armory/data/datasets.py index 5eefc0b3f..68a086590 100644 --- a/armory/data/datasets.py +++ b/armory/data/datasets.py @@ -676,6 +676,7 @@ def canonical_variable_image_preprocess(context, batch): resisc10_context = ImageContext(x_shape=(64, 64, 3)) imagenette_context = ImageContext(x_shape=(None, None, 3)) xview_context = ImageContext(x_shape=(None, None, 3)) +coco_context = ImageContext(x_shape=(None, None, 3)) ucf101_context = VideoContext(x_shape=(None, None, None, 3), frame_rate=25) @@ -711,6 +712,10 @@ def xview_canonical_preprocessing(batch): return canonical_variable_image_preprocess(xview_context, batch) +def coco_canonical_preprocessing(batch): + return canonical_variable_image_preprocess(coco_context, batch) + + def ucf101_canonical_preprocessing(batch): return canonical_variable_image_preprocess(ucf101_context, batch) @@ -1381,6 +1386,147 @@ def xview( ) +def coco_label_preprocessing(x, y): + """ + If batch_size is 1, this function converts the single y dictionary to a list of length 1. + This function converts COCO labels from a 0-79 range to the standard 0-89 with 10 unused indices + (see https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt). + The label map used matches the link above, with the note that labels start from 0 rather than 1. + """ + # This will be true only when batch_size is 1 + if isinstance(y, dict): + y = [y] + idx_map = { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9, + 10: 10, + 11: 12, + 12: 13, + 13: 14, + 14: 15, + 15: 16, + 16: 17, + 17: 18, + 18: 19, + 19: 20, + 20: 21, + 21: 22, + 22: 23, + 23: 24, + 24: 26, + 25: 27, + 26: 30, + 27: 31, + 28: 32, + 29: 33, + 30: 34, + 31: 35, + 32: 36, + 33: 37, + 34: 38, + 35: 39, + 36: 40, + 37: 41, + 38: 42, + 39: 43, + 40: 45, + 41: 46, + 42: 47, + 43: 48, + 44: 49, + 45: 50, + 46: 51, + 47: 52, + 48: 53, + 49: 54, + 50: 55, + 51: 56, + 52: 57, + 53: 58, + 54: 59, + 55: 60, + 56: 61, + 57: 62, + 58: 63, + 59: 64, + 60: 66, + 61: 69, + 62: 71, + 63: 72, + 64: 73, + 65: 74, + 66: 75, + 67: 76, + 68: 77, + 69: 78, + 70: 79, + 71: 80, + 72: 81, + 73: 83, + 74: 84, + 75: 85, + 76: 86, + 77: 87, + 78: 88, + 79: 89, + } + for label_dict in y: + label_dict["boxes"] = label_dict.pop("bbox").reshape(-1, 4) + label_dict["labels"] = np.vectorize(idx_map.__getitem__)( + label_dict.pop("label").reshape(-1,) + ) + return y + + +def coco2017( + split: str = "train", + epochs: int = 1, + batch_size: int = 1, + dataset_dir: str = None, + preprocessing_fn: Callable = coco_canonical_preprocessing, + label_preprocessing_fn: Callable = coco_label_preprocessing, + fit_preprocessing_fn: Callable = None, + cache_dataset: bool = True, + framework: str = "numpy", + shuffle_files: bool = True, + **kwargs, +) -> ArmoryDataGenerator: + """ + split - one of ("train", "validation", "test") + + Note: images from the "test" split are not annotated. + """ + preprocessing_fn = preprocessing_chain(preprocessing_fn, fit_preprocessing_fn) + if "class_ids" in kwargs: + raise ValueError("Filtering by class is not supported for the coco2017 dataset") + return _generator_from_tfds( + "coco/2017:1.1.0", + split=split, + batch_size=batch_size, + epochs=epochs, + dataset_dir=dataset_dir, + preprocessing_fn=preprocessing_fn, + label_preprocessing_fn=label_preprocessing_fn, + as_supervised=False, + supervised_xy_keys=("image", "objects"), + variable_length=bool(batch_size > 1), + variable_y=bool(batch_size > 1), + cache_dataset=cache_dataset, + framework=framework, + shuffle_files=shuffle_files, + context=coco_context, + **kwargs, + ) + + class So2SatContext: def __init__(self): self.default_type = np.float32 diff --git a/armory/data/url_checksums/dapricot_test.txt b/armory/data/url_checksums/dapricot_test.txt new file mode 100755 index 000000000..a2d5cb4d1 --- /dev/null +++ b/armory/data/url_checksums/dapricot_test.txt @@ -0,0 +1 @@ +https://armory-public-data.s3.us-east-2.amazonaws.com/adversarial-datasets/dapricot_test.tar.gz 297987080 836b428043018ddcc5974dd46aa73c933641fa6d822dea91cfe46ed43f774276 diff --git a/docs/adversarial_datasets.md b/docs/adversarial_datasets.md index d4fc596a4..e93742465 100644 --- a/docs/adversarial_datasets.md +++ b/docs/adversarial_datasets.md @@ -2,13 +2,13 @@ The `armory.data.adversarial_datasets` module implements functionality to return adversarial datasets of various data modalities. By default, this is a NumPy `ArmoryDataGenerator` which -implements the methods needed by the ART framework. Specifically `get_batch` will -return a tuple of `((data_clean, data_adversarial), label_clean)` for a specified batch size in numpy format, -where 'data_clean' and 'label_clean' represent a clean example and its true label, and 'data_adversarial' -represents the corresponding adversarially attacked example. The lone exception to this is the [APRICOT](https://arxiv.org/abs/1912.08166) -dataset, which contains physical adversarial patches and returns a tuple of `(data_adversarial, label_adversarial)` for each batch. -Each adversarial dataset contains adversarial examples generated using one or more attacks. +implements the methods needed by the ART framework. +For most adversarial datasets, `get_batch()` returns a tuple of `((data_clean, data_adversarial), label_clean)` for a +specified batch size in numpy format, where `data_clean` and `label_clean` represent a clean example and its true +label, and `data_adversarial` represents the corresponding adversarially attacked example. The +[APRICOT](https://arxiv.org/abs/1912.08166) and DAPRICOT datasets differ in that `get_batch()` returns a tuple +of `(data_adversarial, label_adversarial)`. @@ -57,6 +57,7 @@ Note: the APRICOT dataset contains splits for ["frcnn", "ssd", "retinanet"] rath | "apricot_dev_adversarial" | ["adversarial", frcnn", "ssd", "retinanet"] * | Physical Adversarial Attacks on Object Detection| Targeted, universal patch | dev | (nb, variable_height, variable_width, 3) | uint8 | n/a | dict | 138 images | | "apricot_test_adversarial" | ["adversarial", frcnn", "ssd", "retinanet"] * | Physical Adversarial Attacks on Object Detection| Targeted, universal patch | test | (nb, variable_height, variable_width, 3) | uint8 | n/a | dict | 873 images | | "dapricot_dev_adversarial" | ["small", medium", "large"] ** | Physical Adversarial Attacks on Object Detection| Targeted patch | dev | (nb, 3, 1008, 756, 3) | uint8 | n/a | 2-tuple | 81 examples (3 images per example) | +| "dapricot_test_adversarial" | ["small", medium", "large"] ** | Physical Adversarial Attacks on Object Detection| Targeted patch | test | (nb, 3, 1008, 756, 3) | uint8 | n/a | 2-tuple | 324 examples (3 images per example) | | "imagenet_adversarial" | "adversarial" | ILSVRC12 adversarial image dataset for ResNet50 | Targeted, universal perturbation | test | (nb, 224, 224, 3) |uint8 | (N,) | int64 | 1000 images | | "resisc45_adversarial_224x224" | "adversarial_univpatch" | REmote Sensing Image Scene Classification | Targeted, universal patch | test | (nb, 224, 224, 3) | uint8 | (N,) | int64 | 5 images/class | | "resisc45_adversarial_224x224" | "adversarial_univperturbation" | REmote Sensing Image Scene Classification | Untargeted, universal perturbation | test | (nb, 224, 224, 3) | uint8 | (N,) | int64 | 5 images/class | @@ -67,11 +68,12 @@ Note: the APRICOT dataset contains splits for ["frcnn", "ssd", "retinanet"] rath Note: the APRICOT dataset contains labels and bounding boxes for both COCO objects and physical adversarial patches. The label used to signify the patch is the `ADV_PATCH_MAGIC_NUMBER_LABEL_ID` defined in [armory/data/adversarial_datasets.py](../armory/data/adversarial_datasets.py). Each image contains one adversarial -patch and a varying number of COCO objects (in some cases zero). +patch and a varying number of COCO objects (in some cases zero). COCO object class labels are one-indexed (start from 1) +in Armory <= 0.13.1 and zero-indexed in Armory > 0.13.1. The D-APRICOT dataset does NOT contain labels/bounding boxes for COCO objects, which may occasionally appear in the background (e.g. car). Each image contains one green screen intended for patch insertion. The green screen shapes vary -between diamond, rectangle, and octagon. Each example in the dataset consists of three images, each of a different camera +between diamond, rectangle, and octagon. A dataset example consists of three images, each of a different camera angle of the same scene and green screen. diff --git a/docs/datasets.md b/docs/datasets.md index 52813502a..a46242bc0 100644 --- a/docs/datasets.md +++ b/docs/datasets.md @@ -22,8 +22,10 @@ These tfrecord files will be pulled from S3 if not available on your | [imagenette](https://github.com/fastai/imagenette) | Smaller subset of 10 classes from Imagenet | (N, variable_height, variable_width, 3) | uint8 | (N,) | int64 | train, validation | | [mnist](http://yann.lecun.com/exdb/mnist/) | MNIST hand written digit image dataset | (N, 28, 28, 1) | float32 | (N,) | int64 | train, test | | [resisc45](https://arxiv.org/abs/1703.00121) | REmote Sensing Image Scene Classification | (N, 256, 256, 3) | float32 | (N,) | int64 | train, validation, test | +| [Coco2017](https://arxiv.org/abs/1405.0312) | Common Objects in Context | (N, variable_height, variable_width, 3) | float32 | n/a | dict | train, validation, test | | [xView](https://arxiv.org/pdf/1802.07856) | Objects in Context in Overhead Imagery | (N, variable_height, variable_width, 3) | float32 | n/a | dict | train, test | +NOTE: the Coco2017 dataset's class labels are 0-indexed (start from 0).
### Audio Datasets diff --git a/scenario_configs/apricot_frcnn.json b/scenario_configs/apricot_frcnn.json index 97a40c613..33ee992d4 100644 --- a/scenario_configs/apricot_frcnn.json +++ b/scenario_configs/apricot_frcnn.json @@ -44,7 +44,7 @@ "name": "ObjectDetectionTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/apricot_frcnn_defended.json b/scenario_configs/apricot_frcnn_defended.json index 7b2269a49..1ec0239e1 100644 --- a/scenario_configs/apricot_frcnn_defended.json +++ b/scenario_configs/apricot_frcnn_defended.json @@ -57,7 +57,7 @@ "name": "ObjectDetectionTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/asr_deepspeech_baseline.json b/scenario_configs/asr_deepspeech_baseline.json index 0b1aeaa16..a94ca0b51 100644 --- a/scenario_configs/asr_deepspeech_baseline.json +++ b/scenario_configs/asr_deepspeech_baseline.json @@ -76,7 +76,7 @@ "name": "AutomaticSpeechRecognition" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch-deepspeech:0.13.1", + "docker_image": "twosixarmory/pytorch-deepspeech:0.13.2", "external_github_repo": "hkakitani/deepspeech.pytorch", "gpus": "all", "local_repo_path": null, diff --git a/scenario_configs/asr_deepspeech_baseline_fgsm.json b/scenario_configs/asr_deepspeech_baseline_fgsm.json index 5ca632d52..1748b1d5c 100644 --- a/scenario_configs/asr_deepspeech_baseline_fgsm.json +++ b/scenario_configs/asr_deepspeech_baseline_fgsm.json @@ -68,7 +68,7 @@ "name": "AutomaticSpeechRecognition" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch-deepspeech:0.13.1", + "docker_image": "twosixarmory/pytorch-deepspeech:0.13.2", "external_github_repo": "hkakitani/deepspeech.pytorch", "gpus": "all", "local_repo_path": null, diff --git a/scenario_configs/asr_deepspeech_baseline_fgsm_channel.json b/scenario_configs/asr_deepspeech_baseline_fgsm_channel.json index 481a31bdc..897fbb7ba 100644 --- a/scenario_configs/asr_deepspeech_baseline_fgsm_channel.json +++ b/scenario_configs/asr_deepspeech_baseline_fgsm_channel.json @@ -73,7 +73,7 @@ "name": "AutomaticSpeechRecognition" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch-deepspeech:0.13.1", + "docker_image": "twosixarmory/pytorch-deepspeech:0.13.2", "external_github_repo": "hkakitani/deepspeech.pytorch", "gpus": "all", "local_repo_path": null, diff --git a/scenario_configs/asr_deepspeech_baseline_kenansville.json b/scenario_configs/asr_deepspeech_baseline_kenansville.json index 1b6eec608..b29f8c3dd 100644 --- a/scenario_configs/asr_deepspeech_baseline_kenansville.json +++ b/scenario_configs/asr_deepspeech_baseline_kenansville.json @@ -53,7 +53,7 @@ "name": "AutomaticSpeechRecognition" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch-deepspeech:0.13.1", + "docker_image": "twosixarmory/pytorch-deepspeech:0.13.2", "external_github_repo": "hkakitani/deepspeech.pytorch", "gpus": "all", "local_repo_path": null, diff --git a/scenario_configs/asr_deepspeech_defended_baseline_kenansville.json b/scenario_configs/asr_deepspeech_defended_baseline_kenansville.json index cd12f0553..b13a56d8b 100644 --- a/scenario_configs/asr_deepspeech_defended_baseline_kenansville.json +++ b/scenario_configs/asr_deepspeech_defended_baseline_kenansville.json @@ -64,7 +64,7 @@ "name": "AutomaticSpeechRecognition" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch-deepspeech:0.13.1", + "docker_image": "twosixarmory/pytorch-deepspeech:0.13.2", "external_github_repo": "hkakitani/deepspeech.pytorch", "gpus": "all", "local_repo_path": null, diff --git a/scenario_configs/asr_deepspeech_snr.json b/scenario_configs/asr_deepspeech_snr.json index 984f4f45d..d6946a046 100644 --- a/scenario_configs/asr_deepspeech_snr.json +++ b/scenario_configs/asr_deepspeech_snr.json @@ -69,7 +69,7 @@ "name": "AutomaticSpeechRecognition" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch-deepspeech:0.13.1", + "docker_image": "twosixarmory/pytorch-deepspeech:0.13.2", "external_github_repo": "hkakitani/deepspeech.pytorch", "gpus": "all", "local_repo_path": null, diff --git a/scenario_configs/cifar10_baseline.json b/scenario_configs/cifar10_baseline.json index b6fd7ec27..4ae14f068 100644 --- a/scenario_configs/cifar10_baseline.json +++ b/scenario_configs/cifar10_baseline.json @@ -49,7 +49,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/cifar10_defended_example.json b/scenario_configs/cifar10_defended_example.json index f89e7db6e..1c19a3def 100644 --- a/scenario_configs/cifar10_defended_example.json +++ b/scenario_configs/cifar10_defended_example.json @@ -57,7 +57,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/dapricot_frcnn_masked_pgd.json b/scenario_configs/dapricot_frcnn_masked_pgd.json index 65324e05d..d87d21485 100644 --- a/scenario_configs/dapricot_frcnn_masked_pgd.json +++ b/scenario_configs/dapricot_frcnn_masked_pgd.json @@ -25,7 +25,7 @@ "eval_split": "large+medium+small", "framework": "numpy", "module": "armory.data.adversarial_datasets", - "name": "dapricot_dev_adversarial" + "name": "dapricot_test_adversarial" }, "defense": null, "metric": { @@ -53,7 +53,7 @@ "name": "ObjectDetectionTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": "colour-science/colour", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/gtsrb_scenario_baseline.json b/scenario_configs/gtsrb_scenario_baseline.json index 2bb55f2b0..d7b7e108e 100644 --- a/scenario_configs/gtsrb_scenario_baseline.json +++ b/scenario_configs/gtsrb_scenario_baseline.json @@ -58,7 +58,7 @@ "name": "GTSRB" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/gtsrb_scenario_baseline_pytorch.json b/scenario_configs/gtsrb_scenario_baseline_pytorch.json index 8ab3d2ba5..e950d4dfc 100644 --- a/scenario_configs/gtsrb_scenario_baseline_pytorch.json +++ b/scenario_configs/gtsrb_scenario_baseline_pytorch.json @@ -58,7 +58,7 @@ "name": "GTSRB" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/gtsrb_scenario_clbd.json b/scenario_configs/gtsrb_scenario_clbd.json index e7852a590..abbcf6fa1 100644 --- a/scenario_configs/gtsrb_scenario_clbd.json +++ b/scenario_configs/gtsrb_scenario_clbd.json @@ -58,7 +58,7 @@ "name": "GTSRB_CLBD" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/gtsrb_scenario_clbd_bullethole.json b/scenario_configs/gtsrb_scenario_clbd_bullethole.json index eb967b972..8bd288ffd 100644 --- a/scenario_configs/gtsrb_scenario_clbd_bullethole.json +++ b/scenario_configs/gtsrb_scenario_clbd_bullethole.json @@ -64,7 +64,7 @@ "name": "GTSRB_CLBD" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/gtsrb_scenario_clbd_defended.json b/scenario_configs/gtsrb_scenario_clbd_defended.json index 6953ea367..a85ad295d 100644 --- a/scenario_configs/gtsrb_scenario_clbd_defended.json +++ b/scenario_configs/gtsrb_scenario_clbd_defended.json @@ -69,7 +69,7 @@ "name": "GTSRB_CLBD" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/gtsrb_scenario_poison.json b/scenario_configs/gtsrb_scenario_poison.json index 7d587298e..aa507cbdc 100644 --- a/scenario_configs/gtsrb_scenario_poison.json +++ b/scenario_configs/gtsrb_scenario_poison.json @@ -48,7 +48,7 @@ "name": "GTSRB" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "use_gpu": false diff --git a/scenario_configs/librispeech_baseline_sincnet.json b/scenario_configs/librispeech_baseline_sincnet.json index 5144871b6..2fc552a88 100644 --- a/scenario_configs/librispeech_baseline_sincnet.json +++ b/scenario_configs/librispeech_baseline_sincnet.json @@ -55,7 +55,7 @@ "name": "AudioClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "hkakitani/SincNet", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/librispeech_baseline_sincnet_snr_pgd.json b/scenario_configs/librispeech_baseline_sincnet_snr_pgd.json index 44b70427e..5a60614c6 100644 --- a/scenario_configs/librispeech_baseline_sincnet_snr_pgd.json +++ b/scenario_configs/librispeech_baseline_sincnet_snr_pgd.json @@ -59,7 +59,7 @@ "name": "AudioClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "hkakitani/SincNet", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/librispeech_baseline_sincnet_targeted.json b/scenario_configs/librispeech_baseline_sincnet_targeted.json index c0881597a..87c1fdaa7 100644 --- a/scenario_configs/librispeech_baseline_sincnet_targeted.json +++ b/scenario_configs/librispeech_baseline_sincnet_targeted.json @@ -62,7 +62,7 @@ "name": "AudioClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "hkakitani/SincNet", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/mnist_baseline.json b/scenario_configs/mnist_baseline.json index 5b51d2103..e5238e62d 100644 --- a/scenario_configs/mnist_baseline.json +++ b/scenario_configs/mnist_baseline.json @@ -47,7 +47,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/resisc10_poison_dlbd.json b/scenario_configs/resisc10_poison_dlbd.json index 784c07b6a..40789b595 100644 --- a/scenario_configs/resisc10_poison_dlbd.json +++ b/scenario_configs/resisc10_poison_dlbd.json @@ -114,7 +114,7 @@ "name": "RESISC10" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/resisc45_baseline_densenet121.json b/scenario_configs/resisc45_baseline_densenet121.json index 47c78b946..635f7b2a0 100644 --- a/scenario_configs/resisc45_baseline_densenet121.json +++ b/scenario_configs/resisc45_baseline_densenet121.json @@ -45,7 +45,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/resisc45_baseline_densenet121_cascade.json b/scenario_configs/resisc45_baseline_densenet121_cascade.json index a24b5778c..31b570ec3 100644 --- a/scenario_configs/resisc45_baseline_densenet121_cascade.json +++ b/scenario_configs/resisc45_baseline_densenet121_cascade.json @@ -70,7 +70,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/resisc45_baseline_densenet121_finetune.json b/scenario_configs/resisc45_baseline_densenet121_finetune.json index 7d4ae54dd..d8548a26e 100644 --- a/scenario_configs/resisc45_baseline_densenet121_finetune.json +++ b/scenario_configs/resisc45_baseline_densenet121_finetune.json @@ -45,7 +45,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/resisc45_baseline_densenet121_patch.json b/scenario_configs/resisc45_baseline_densenet121_patch.json index a3a7278d2..d8332f41a 100644 --- a/scenario_configs/resisc45_baseline_densenet121_patch.json +++ b/scenario_configs/resisc45_baseline_densenet121_patch.json @@ -58,7 +58,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/resisc45_baseline_densenet121_targeted.json b/scenario_configs/resisc45_baseline_densenet121_targeted.json index 6261ddbe9..177b3285f 100644 --- a/scenario_configs/resisc45_baseline_densenet121_targeted.json +++ b/scenario_configs/resisc45_baseline_densenet121_targeted.json @@ -52,7 +52,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/so2sat_baseline.json b/scenario_configs/so2sat_baseline.json index 41a5aee5d..9adf197f0 100644 --- a/scenario_configs/so2sat_baseline.json +++ b/scenario_configs/so2sat_baseline.json @@ -92,7 +92,7 @@ "name": "So2SatClassification" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/so2sat_defended_baseline.json b/scenario_configs/so2sat_defended_baseline.json index edc323c0a..12d042084 100644 --- a/scenario_configs/so2sat_defended_baseline.json +++ b/scenario_configs/so2sat_defended_baseline.json @@ -138,7 +138,7 @@ "name": "So2SatClassification" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/ucf101_baseline_finetune.json b/scenario_configs/ucf101_baseline_finetune.json index 2e38d2ea2..15fbc87a7 100644 --- a/scenario_configs/ucf101_baseline_finetune.json +++ b/scenario_configs/ucf101_baseline_finetune.json @@ -52,7 +52,7 @@ "name": "Ucf101" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "yusong-tan/MARS", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/ucf101_baseline_pretrained.json b/scenario_configs/ucf101_baseline_pretrained.json index 8dd5e78b3..9e0adcdac 100644 --- a/scenario_configs/ucf101_baseline_pretrained.json +++ b/scenario_configs/ucf101_baseline_pretrained.json @@ -64,7 +64,7 @@ "name": "Ucf101" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "yusong-tan/MARS", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/ucf101_baseline_pretrained_frame_saliency.json b/scenario_configs/ucf101_baseline_pretrained_frame_saliency.json index 87144df03..271ef56ce 100644 --- a/scenario_configs/ucf101_baseline_pretrained_frame_saliency.json +++ b/scenario_configs/ucf101_baseline_pretrained_frame_saliency.json @@ -55,7 +55,7 @@ "name": "Ucf101" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "yusong-tan/MARS", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/ucf101_baseline_pretrained_pgd_patch.json b/scenario_configs/ucf101_baseline_pretrained_pgd_patch.json index a64898f64..60eb3958a 100644 --- a/scenario_configs/ucf101_baseline_pretrained_pgd_patch.json +++ b/scenario_configs/ucf101_baseline_pretrained_pgd_patch.json @@ -63,7 +63,7 @@ "name": "Ucf101" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "yusong-tan/MARS", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/ucf101_baseline_pretrained_targeted.json b/scenario_configs/ucf101_baseline_pretrained_targeted.json index d06e6d530..68d76cbdb 100644 --- a/scenario_configs/ucf101_baseline_pretrained_targeted.json +++ b/scenario_configs/ucf101_baseline_pretrained_targeted.json @@ -58,7 +58,7 @@ "name": "Ucf101" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "yusong-tan/MARS", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/ucf101_defended_baseline_pretrained.json b/scenario_configs/ucf101_defended_baseline_pretrained.json index fddb88726..a5b2b6c16 100644 --- a/scenario_configs/ucf101_defended_baseline_pretrained.json +++ b/scenario_configs/ucf101_defended_baseline_pretrained.json @@ -76,7 +76,7 @@ "name": "Ucf101" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": "yusong-tan/MARS", "gpus": "all", "output_dir": null, diff --git a/scenario_configs/xview_frcnn.json b/scenario_configs/xview_frcnn.json index b91b76ec9..7f915ac4c 100644 --- a/scenario_configs/xview_frcnn.json +++ b/scenario_configs/xview_frcnn.json @@ -53,7 +53,7 @@ "name": "ObjectDetectionTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/xview_frcnn_defended.json b/scenario_configs/xview_frcnn_defended.json index 332a3b406..0adbdcab5 100644 --- a/scenario_configs/xview_frcnn_defended.json +++ b/scenario_configs/xview_frcnn_defended.json @@ -66,7 +66,7 @@ "name": "ObjectDetectionTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/scenario_configs/xview_frcnn_targeted.json b/scenario_configs/xview_frcnn_targeted.json index 860bfae44..b5a8bc78a 100644 --- a/scenario_configs/xview_frcnn_targeted.json +++ b/scenario_configs/xview_frcnn_targeted.json @@ -60,7 +60,7 @@ "name": "ObjectDetectionTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/tests/scenarios/broken/invalid_dataset_framework.json b/tests/scenarios/broken/invalid_dataset_framework.json index 8d330db43..d94fc27f0 100644 --- a/tests/scenarios/broken/invalid_dataset_framework.json +++ b/tests/scenarios/broken/invalid_dataset_framework.json @@ -46,7 +46,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/tests/scenarios/broken/invalid_module.json b/tests/scenarios/broken/invalid_module.json index f44113f75..85babc436 100644 --- a/tests/scenarios/broken/invalid_module.json +++ b/tests/scenarios/broken/invalid_module.json @@ -39,7 +39,7 @@ "name": "fgm_attack" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "", "output_dir": null, diff --git a/tests/scenarios/broken/missing_scenario.json b/tests/scenarios/broken/missing_scenario.json index ffbffe8b5..ead10a2e9 100644 --- a/tests/scenarios/broken/missing_scenario.json +++ b/tests/scenarios/broken/missing_scenario.json @@ -40,7 +40,7 @@ "wrapper_kwargs": {} }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/tests/scenarios/pytorch/image_classification.json b/tests/scenarios/pytorch/image_classification.json index cc1d71f6d..29ee701e9 100644 --- a/tests/scenarios/pytorch/image_classification.json +++ b/tests/scenarios/pytorch/image_classification.json @@ -46,7 +46,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/tests/scenarios/pytorch/image_classification_pretrained.json b/tests/scenarios/pytorch/image_classification_pretrained.json index b7ac24090..7861df51d 100644 --- a/tests/scenarios/pytorch/image_classification_pretrained.json +++ b/tests/scenarios/pytorch/image_classification_pretrained.json @@ -44,7 +44,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/pytorch:0.13.1", + "docker_image": "twosixarmory/pytorch:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/tests/scenarios/tf1/image_classification.json b/tests/scenarios/tf1/image_classification.json index d7d8e2744..e50cb7068 100644 --- a/tests/scenarios/tf1/image_classification.json +++ b/tests/scenarios/tf1/image_classification.json @@ -46,7 +46,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/tests/scenarios/tf1/image_classification_pretrained.json b/tests/scenarios/tf1/image_classification_pretrained.json index 556f41c00..42298b084 100644 --- a/tests/scenarios/tf1/image_classification_pretrained.json +++ b/tests/scenarios/tf1/image_classification_pretrained.json @@ -44,7 +44,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/tests/scenarios/tf1/image_classification_tfgraph.json b/tests/scenarios/tf1/image_classification_tfgraph.json index 64cf53ed7..00cbeecdd 100644 --- a/tests/scenarios/tf1/image_classification_tfgraph.json +++ b/tests/scenarios/tf1/image_classification_tfgraph.json @@ -46,7 +46,7 @@ "name": "ImageClassificationTask" }, "sysconfig": { - "docker_image": "twosixarmory/tf1:0.13.1", + "docker_image": "twosixarmory/tf1:0.13.2", "external_github_repo": null, "gpus": "all", "output_dir": null, diff --git a/tests/test_docker/test_dataset.py b/tests/test_docker/test_dataset.py index 976a3437f..73b9517f8 100644 --- a/tests/test_docker/test_dataset.py +++ b/tests/test_docker/test_dataset.py @@ -461,7 +461,28 @@ def test_resisc45_adversarial_224x224(): assert y.shape == (batch_size,) -def test_dapricot(): +def test_coco2017(): + if not os.path.exists(os.path.join(DATASET_DIR, "coco", "2017", "1.1.0")): + pytest.skip("coco2017 dataset not downloaded.") + + split_size = 5000 + split = "validation" + dataset = datasets.coco2017(split=split,) + assert dataset.size == split_size + + for i in range(8): + x, y = dataset.get_batch() + assert x.shape[0] == 1 + assert x.shape[-1] == 3 + assert isinstance(y, list) + assert len(y) == 1 + y_dict = y[0] + assert isinstance(y_dict, dict) + for obj_key in ["labels", "boxes", "area"]: + assert obj_key in y_dict + + +def test_dapricot_dev(): split_size = 27 split = "small" dataset = adversarial_datasets.dapricot_dev_adversarial(split=split,) @@ -482,6 +503,27 @@ def test_dapricot(): assert patch_metadata_key in y_patch_metadata[k] +def test_dapricot_test(): + split_size = 108 + split = "small" + dataset = adversarial_datasets.dapricot_test_adversarial(split=split,) + assert dataset.size == split_size + + x, y = dataset.get_batch() + for i in range(2): + assert x.shape == (1, 3, 1008, 756, 3) + assert isinstance(y, tuple) + assert len(y) == 2 + y_object, y_patch_metadata = y + assert len(y_object) == 3 # 3 images per example + for obj_key in ["labels", "boxes", "area"]: + for k in range(3): + assert obj_key in y_object[k] + for patch_metadata_key in ["cc_scene", "cc_ground_truth", "gs_coords", "shape"]: + for k in range(3): + assert patch_metadata_key in y_patch_metadata[k] + + def test_ucf101_adversarial_112x112(): if not os.path.isdir( os.path.join( diff --git a/tests/test_tf1/test_tf1_models.py b/tests/test_tf1/test_tf1_models.py index b2df48f00..733a6718e 100644 --- a/tests/test_tf1/test_tf1_models.py +++ b/tests/test_tf1/test_tf1_models.py @@ -39,6 +39,35 @@ def test_tf1_mnist(): assert (accuracy / test_dataset.batches_per_epoch) > 0.9 +@pytest.mark.usefixtures("ensure_armory_dirs") +def test_tf1_coco(): + if not os.path.exists(os.path.join(DATASET_DIR, "coco", "2017", "1.1.0")): + pytest.skip("coco2017 dataset not downloaded.") + + detector_module = import_module("armory.baseline_models.tf_graph.mscoco_frcnn") + detector_fn = getattr(detector_module, "get_art_model") + detector = detector_fn(model_kwargs={}, wrapper_kwargs={}) + + NUM_TEST_SAMPLES = 10 + dataset = datasets.coco2017(split="validation", shuffle_files=False) + + list_of_ys = [] + list_of_ypreds = [] + for _ in range(NUM_TEST_SAMPLES): + x, y = dataset.get_batch() + y_pred = detector.predict(x) + list_of_ys.extend(y) + list_of_ypreds.extend(y_pred) + + average_precision_by_class = object_detection_AP_per_class( + list_of_ys, list_of_ypreds + ) + mAP = np.fromiter(average_precision_by_class.values(), dtype=float).mean() + for class_id in [0, 2, 5, 9, 10]: + assert average_precision_by_class[class_id] > 0.6 + assert mAP > 0.1 + + @pytest.mark.usefixtures("ensure_armory_dirs") def test_tf1_apricot(): if not os.path.isdir(os.path.join(DATASET_DIR, "apricot_dev", "1.0.1")):