Skip to content

Commit

Permalink
Merge branch 'dev' of https://github.com/Project-AgML/AgML into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
naitikjain3071 committed Oct 14, 2024
2 parents b1362e3 + 112ac38 commit 8a5efbc
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 11 deletions.
24 changes: 24 additions & 0 deletions agml/_assets/public_datasources.json
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,30 @@
},
"external_image_sources": []
},
"tomato_ripeness_detection": {
"ml_task": "object_detection",
"ag_task": "fruit_ripeness_detection",
"location": {
"continent": "worldwide",
"country": "worldwide"
},
"sensor_modality": "rgb",
"real_synthetic": "real",
"platform": "ground",
"input_data_format": "jpg",
"annotation_format": "coco_json",
"n_images": "804",
"docs_url": "https://www.kaggle.com/datasets/nexuswho/laboro-tomato",
"classes": {
"1": "b_fully_ripened",
"2": "b_half_ripened",
"3": "b_green",
"4": "l_fully_ripened",
"5": "l_half_ripened",
"6": "l_green"
},
"external_image_sources": []
},
"apple_flower_segmentation": {
"ml_task": "semantic_segmentation",
"ag_task": "flower_segmentation",
Expand Down
4 changes: 4 additions & 0 deletions agml/_assets/source_citations.json
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@
"license": "",
"citation": "@Misc{Koirala2019,\n author={Koirala, Anand and Walsh, Kerry and Wang, Z. and McCarthy, C.},\n title={MangoYOLO data set},\n year={2019},\n month={2021},\n day={10-19},\n publisher={Central Queensland University},\n keywords={Mango images; Fruit detection; Yield estimation; Mango; Agricultural Land Management; Horticultural Crop Growth and Development},\n abstract={Datasets and directories are structured similar to the PASCAL VOC dataset, avoiding the need to change scripts already available, with the detection frameworks ready to parse PASCAL VOC annotations into their format. The sub-directory JPEGImages consist of 1730 images (612x512 pixels) used for train, test and validation. Each image has at least one annotated fruit. The sub-directory Annotations consists of all the annotation files (record of bounding box coordinates for each image) in xml format and have the same name as the image name. The sub-directory Main consists of the text file that contains image names (without extension) used for train, test and validation. Training set (train.txt) lists 1300 train images Validation set (val.txt) lists 130 validation images Test set (test.txt) lists 300 test images Each image has an XML annotation file (filename = image name) and each image set (training validation and test set) has associated text files (train.txt, val.txt and test.txt) containing the list of image names to be used for training and testing. The XML annotation file contains the image attributes (name, width, height), the object attributes (class name, object bounding box co-ordinates (xmin, ymin, xmax, ymax)). (xmin, ymin) and (xmax, ymax) are the pixel co-ordinates of the bounding box's top-left corner and bottom-right corner respectively.},\n note={CC-BY-4.0},\n url={https://figshare.com/articles/dataset/MangoYOLO_data_set/13450661, https://researchdata.edu.au/mangoyolo-set},\n language={English}\n}"
},
"tomato_ripeness_detection": {
"license": "CC BY-NC-SA 4.0",
"citation": ""
},
"apple_flower_segmentation": {
"license": "US Public Domain",
"citation": "@ARTICLE{8392727,\n author={Dias, Philipe A. and Tabb, Amy and Medeiros, Henry},\n journal={IEEE Robotics and Automation Letters}, \n title={Multispecies Fruit Flower Detection Using a Refined Semantic Segmentation Network}, \n year={2018},\n volume={3},\n number={4},\n pages={3003-3010},\n doi={10.1109/LRA.2018.2849498}}"
Expand Down
3 changes: 3 additions & 0 deletions agml/_internal/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,9 @@ def mango_detection_australia(self, dataset_name):
output_imgpath = output_img_path,
extract_num_from_imgid = True
)

def tomato_ripeness_detection(self, dataset_name):
pass

def cotton_seedling_counting(self, dataset_name):
# Get all of the relevant data
Expand Down
42 changes: 31 additions & 11 deletions agml/viz/display.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,34 @@

import cv2
import matplotlib.pyplot as plt

from PIL import Image, ExifTags
import numpy as np
from agml.viz.tools import get_viz_backend

def correct_image_orientation(image):
"""Correct image orientation based on EXIF data."""
try:
pil_image = Image.fromarray(image)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = pil_image._getexif()
if exif is not None:
orientation_value = exif.get(orientation, None)

# Rotate the image according to EXIF orientation
if orientation_value == 3:
pil_image = pil_image.rotate(180, expand=True)
elif orientation_value == 6:
pil_image = pil_image.rotate(270, expand=True)
elif orientation_value == 8:
pil_image = pil_image.rotate(90, expand=True)

# Convert back to numpy array for further use (e.g., with OpenCV or Matplotlib)
return np.array(pil_image)
except Exception as e:
# If any issue occurs, return the image as-is
return image

def display_image(image, **kwargs):
"""Displays an image using the appropriate backend."""
Expand All @@ -30,6 +55,9 @@ def display_image(image, **kwargs):
except NameError:
pass

# Correct the orientation before displaying
image = correct_image_orientation(image)

if get_viz_backend() == 'cv2':
# If running in Colab, then use a separate procedure.
if 'google.colab' in sys.modules:
Expand All @@ -45,29 +73,21 @@ def display_image(image, **kwargs):
# If running in a Jupyter notebook, then for some weird reason it automatically
# displays images in the background, so don't actually do anything here.
if notebook:
# If the input content is not a figure, then we can display it.
if kwargs.get('matplotlib_figure', True):
return

else:
if kwargs.get('read_raw', False):
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # convert back to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # convert back to BGR
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyWindow('image')
return

if get_viz_backend() == 'matplotlib':
# If we are in a notebook or Colab, then don't show anything (since
# it will be displayed automatically due to the way notebooks are).
#
# But also, some methods don't have any Matplotlib functions - so
# for those we bypass this skip just to show the result.
if 'google.colab' in sys.modules or notebook and not kwargs.get('force_show', False):
return

# Default case is matplotlib, since it is the most modular.
fig = plt.figure(figsize = (10, 10))
fig = plt.figure(figsize=(10, 10))
plt.imshow(image)
plt.gca().axis('off')
plt.gca().set_aspect('equal')
Expand Down

0 comments on commit 8a5efbc

Please sign in to comment.