diff --git a/build/lib/pcv/__init__.py b/build/lib/pcv/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/pcv/interact.py b/build/lib/pcv/interact.py new file mode 100644 index 0000000..d812287 --- /dev/null +++ b/build/lib/pcv/interact.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 + +import cv2 + + +waitKey = lambda ms : cv2.waitKey(ms) & 0xFF + + +class DoNothing: + ''' A context manager that does nothing. ''' + def __init__(self): pass + def __enter__(self): return self + def __exit__(self, *args): pass + + +class MouseCallback: + ''' A context manager for temporary mouse callbacks. ''' + def __init__(self, window, handler, param=None, + restore=lambda *args: None, restore_param=None): + ''' Initialise with the window, handler, and restoration command. + + 'window' is the name of the window to set the callback for. + 'handler' is the function for handling the callback, which should take + x, y, flags ('&'ed EVENT_FLAG bits), and an optional param passed + in from the callback handler. + 'param' is any Python object that should get passed to the handler + on each call - useful for tracking state. + 'restore' is the function to restore as the handler on context exit. + 'restore_param' is the handler param to restore on context exit. + + ''' + self.window = window + self.handler = handler + self.param = param + self.restore = restore + self.restore_param = restore_param + + def __enter__(self): + cv2.setMouseCallback(self.window, self.handler, self.param) + return self + + def __exit__(self, *args): + cv2.setMouseCallback(self.window, self.restore, self.restore_param) diff --git a/build/lib/pcv/process.py b/build/lib/pcv/process.py new file mode 100644 index 0000000..0c6e1c2 --- /dev/null +++ b/build/lib/pcv/process.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 + +import cv2 + +def downsize(img, ratio): + ''' downsize 'img' by 'ratio'. ''' + return cv2.resize(img, + tuple(dim // ratio for dim in reversed(img.shape[:2])), + interpolation = cv2.INTER_AREA) + +def channel_options(img): + ''' Create a composite image of img in all of opencv's colour channels + + |img| -> | blue | green | red | + | hue | saturation | value | + | hue2 | luminosity | saturation2 | + | lightness | green-red | blue-yellow | + | lightness2 | u | v | + + ''' + B,G,R = cv2.split(img) + H,S,V = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + H2,L2,S2 = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HLS)) + L,a,b = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2LAB)) + L3,u,v = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2LUV)) + channels = (((B, 'blue'), (G, 'green'), (R, 'red')), + ((H, 'hue'), (S, 'saturation'), (V, 'value')), + ((H2, 'hue2'), (L2, 'luminosity'), (S2, 'saturation2')), + ((L, 'lightness'), (a, 'green-red'), (b, 'blue-yellow')), + ((L3,'lightness2'), (u, 'u'), (v, 'v'))) + out = [] + for row in channels: + img_row = [] + for img, name in row: + cv2.putText(img, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, + 0.6, 255, 1) + img_row.append(img) + out.append(cv2.hconcat(img_row)) + return cv2.vconcat(out) diff --git a/build/lib/pcv/something_fishy.py b/build/lib/pcv/something_fishy.py new file mode 100644 index 0000000..4725411 --- /dev/null +++ b/build/lib/pcv/something_fishy.py @@ -0,0 +1,539 @@ +#!/usr/bin/env python3 + +import numpy as np +import cv2 +from os import mkdir +from os.path import isfile, isdir +from pcv.vidIO import LockedCamera +from pcv.interact import MouseCallback + + +class Fish: + ''' A fish that floats, and can be tickled and/or caught. ''' + def __init__(self, name, position, velocity, tank_dims, depth): + ''' Create a fish with specified attributes. + + 'name' should be a string, and determines the internal properties + of the fish, such as size and colour. + 'position' and 'velocity' should be complex numbers with the real + axis pointing down the image, and the imaginary axis pointing + across to the right. The origin is the top left corner. + This helps facilitate easy flipping at the sides (complex + conjugate), and rotation at the top/bottom. + 'tank_dims' is a tuple of the dimensions of the tank, as + (height, width) coordinates. + 'depth' is this fish's location relative to the viewer/other fish. + It should generally be between 1 and 50, and will likely raise + an Exception if below -5 or above 95 (it is used to determine + water cover). + + ''' + self.name = name + self.position = position + self.velocity = velocity + self.x_min = self.y_min = 0 + self.y_max, self.x_max = tank_dims + self.depth = depth + self.is_caught = 0 + self._changed = [False, False] + self.determine_appearance() + self.update_angle() + + @property + def position_tuple(self): + ''' The x, y position coordinates. ''' + return self.position.imag, self.position.real + + @property + def velocity_tuple(self): + ''' The x, y velocity coordinates. ''' + return self.velocity.imag, self.velocity.real + + @property + def bbox(self): + ''' A rough bounding box of the fish. ''' + long = self.axes[0] + return np.array(list(self.position_tuple)*2).reshape(2,2) \ + - np.array([[long], [-long]]) + + def determine_appearance(self): + ''' Use self.name to determine size and colour. ''' + # convert name to (0,1] numbers, use only lowercase for good range + min_char, max_char = ord('a')-1, ord('z') + range_ = max_char - min_char + numbers = [(ord(c.lower())-min_char)/range_ + for c in self.name if c not in " -'"] + self.colours = np.array([255*np.array([numbers[i], numbers[-i%3]]) + for i in range(-3,0)]) + # add blue for depth (at least 5% behind water) + alpha = self.depth / 100 + 0.05 + self.colours = self.add_water(self.colours, alpha).T + + # determine size and shape + self.size = 3*(sum(numbers) + len(numbers)) / (sum(numbers[:2])/2) + self.axes = (self.size / (numbers[1]+2), + self.size / (numbers[2]+3.2)) + + # eye properties + self._eye_offset = np.array([3*self.axes[0]/5, self.axes[1]/4]) + self._eye_size = int(self.axes[0] / 7) + self._pupil_size = int(self.axes[1] / 8) + + # fin properties + self._fin_points = np.array([[0,0],[-1,numbers[2]],[-1,-numbers[1]]]) \ + * self.axes[1] + + # tail properties + self._tail_points = np.array( + [[-self._eye_offset[0], 0], + -self._eye_offset * [2, 3], + self._eye_offset * [-(1.2 + 2 * numbers[0] / 3), 1]] * 2) + self._tail_points = [self._tail_points[:3], self._tail_points[3:]] + self._tail_points[1][1,1] *= -1 + self._tail_points[1][2,1] *= -1 + + @staticmethod + def add_water(colour, alpha): + ''' Add (100*alpha)% water cover to the specified colour. ''' + beta = 1 - alpha + colour *= beta + colour[0] += 200 * alpha + colour[1] += 40 * alpha + return colour + + def update_angle(self): + ''' Compute the updated angle and rotation based on the velocity. ''' + angle = np.arctan2(*self.velocity_tuple[::-1]) + self._rotate_fin = np.array([np.cos(angle), np.sin(angle)]) + self._rotate = np.array([self._rotate_fin, + (self._rotate_fin * [1,-1])[::-1]]) + self.angle = np.degrees(angle) + + def draw(self, img, acceleration): + ''' Draw self and update state according to acceleration. + + If self.is_caught, draws a fading ellipse for 50 frames before + skipping drawing altogether. + + 'img' is the image to draw onto, and should be an 8-bit colour image. + 'acceleration' should be a single-channel image with the same width + and height as 'img', mapping position to acceleration. + + ''' + if self.is_caught < 50: + colour = self.colours[0].copy() + thickness = -1 + if self.is_caught: + alpha = self.is_caught / 50 + colour = tuple(self.add_water(colour, alpha)) + pos = tuple(int(dim) for dim in self.position_tuple) + self.is_caught += 1 + thickness = 1 + + cv2.ellipse(img, (self.position_tuple, + tuple(2*dim for dim in self.axes), + self.angle), colour, thickness) + + if not self.is_caught: + for draw_func in (self.draw_eye, self.draw_tail, self.draw_fin): + draw_func(img) + + self.update_state(acceleration) + + def draw_eye(self, img): + ''' Draw eye on 'img'. ''' + eye_offset = self._eye_offset * [1, -np.sign(self.velocity.imag)] + eye_offset = eye_offset @ self._rotate + pupil_offset = eye_offset * 1.05 + eye_pos = tuple(int(dim) for dim in eye_offset + self.position_tuple) + pupil_pos = tuple(int(dim) for dim in + pupil_offset + self.position_tuple) + for pos, size, colour in [[eye_pos, self._eye_size, (244,212,204)], + [pupil_pos, self._pupil_size, (40,8,0)]]: + cv2.circle(img, pos, size, colour, -1) + + def draw_tail(self, img): + ''' Draw tail on 'img'. ''' + colour = tuple(int(channel) for channel in self.colours[1]) + for half in self._tail_points: + half_points = half @ self._rotate + self.position_tuple + cv2.fillConvexPoly(img, np.int32(half_points), colour) + + def draw_fin(self, img): + ''' Draw fin on 'img'. ''' + colour = tuple(int(channel) for channel in self.colours[1]) + fin_points = self._rotate_fin * self._fin_points + self.position_tuple + cv2.fillConvexPoly(img, np.int32(fin_points), colour) + + def update_state(self, acceleration): + ''' Update the fish position/velocity. ''' + # update position + self.position += self.velocity + + # update velocity + self.velocity *= 0.995 # add a touch of damping to avoid crazy speeds + + x,y = np.int32(np.round(self.position_tuple)) + x_accel, y_accel = acceleration + try: + self.velocity += (x_accel[y,x] + 1j * y_accel[y,x]) + except IndexError: + pass # outside the tank + + # update relevant velocity component if outside the tank + for index, (min_, max_) in enumerate([[self.x_min, self.x_max], + [self.y_min, self.y_max]]): + val = self.position_tuple[index] + left = val < min_ + if (left or val > max_): + if not self._changed[index]: + if index == 0: # mirror if hitting side + self.velocity = self.velocity.conjugate() + else: # rotate 90 degrees if hitting top/bottom + direc = -2 * (left - 0.5) + self.velocity *= \ + direc * np.sign(self.velocity.imag) * 1j + self._changed[index] = True + elif self._changed[index]: + # back in the tank + self._changed[index] = False + + self.update_angle() + + def catch(self): + self.is_caught = 1 + print(f'Caught {self}!') + + def __str__(self): + return f'{self.name}: {self.size/100:.3f}kg' + + +class FishTank: + ''' A tank for storing and tracking fish. ''' + def __init__(self, tank_dims, max_fish=40, name_file='names.txt'): + ''' Create a new fish tank as specified. + + 'tank_dims' should be a tuple of (height, width) pixels + 'max_fish' is the largest number of fish that can be generated on + initialisation (random number generated between 1 and max_fish). + Defaults to 40. + 'name_file' is the filename of a newline separated file containing + possible names for the fish in the tank. Defaults to 'names.txt' + which on distribution contains a few hundred popular names from + around the world. Feel free to change it to a set of names of your + family and friends! + + ''' + # store/initialise input parameters + self.dims = np.int32(tank_dims) + self.max_fish = max_fish + with open(name_file) as names: + self.names = [name.strip() for name in names] + + self._initialise_fish() + self._initialise_stats() + self._initialise_instructions() + self._setup_mouse_control() + + def _initialise_fish(self): + ''' initialise fish, including storage and trackers ''' + # create a fast random number generator + self.rng = np.random.default_rng() + + self.caught_fish = [] + self.fish = [] + self._num_fish = self.rng.integers(2, self.max_fish) + for i in range(self._num_fish): + self.fish.append(self.random_fish(i)) + + def random_fish(self, depth): + ''' Create a random fish instance. ''' + return Fish(self.random_name(), self.random_position(), + self.random_velocity(), self.dims, depth) + + def random_name(self): + ''' Randomly choose a name for a fish. ''' + return self.names[self.rng.integers(len(self.names))] + + def random_position(self): + ''' Determine a valid random position for a fish. ''' + offset = 40 + return complex(self.rng.integers(offset, self.dims[0]-offset), + self.rng.integers(offset, self.dims[1]-offset)) + + def random_velocity(self): + ''' Create a random initial velocity for a fish. ''' + max_x = self.dims[1] // 100 + max_y = self.dims[0] // 100 + return complex(self.rng.integers(-max_y, max_y), + self.rng.integers(-max_x, max_x)) + + def _initialise_stats(self): + ''' Intialise stats and tracking parameters. ''' + self._prev = np.zeros(tuple(self.dims), dtype=np.int32) + self._precision = 0 + self._gradient = False + self._attempts = 0 + self._t = 1 + + def _initialise_instructions(self): + ''' Create some helpful instructions to display at the start. ''' + self._instructions_visible = True + + scale = 0.6 + thickness = 1 + height, width = self.dims + font = cv2.FONT_HERSHEY_SIMPLEX + self._instructions = np.zeros(tuple((height, width, 3)), dtype=np.uint8) + + instructions = ( + "Who lives in a pineapple with OpenCV?", + '', + "Press 'i' to toggle instructions on/off", + "Press 'g' to toggle the image gradient used for acceleration", + "Press 'q' to quit, and SPACE to pause/resume", + '', + "Catch fish by dragging your 'net' over them with the mouse", + "(if your box is too big or small they'll escape).", + "Caught fish will have their image, with name and size", + "displayed in the 'gallery' folder.", + '', + "'Hit rate' is the percentage of attempts you've caught a fish in.", + "'Avg size ratio' is the ratio of your box size over the fish size", + "for each of your successful catches - smaller is more skillful.", + '', + "Some fish might escape the tank and not come back, and that's ok.", + ) + + # add instructions to an empty image, for merging later + num_instructions = len(instructions) + text_height = cv2.getTextSize(' ', font, scale, thickness)[0][1] + spacing = 2 * text_height / 3 + tot_y = num_instructions * text_height + spacing * (num_instructions - 2) + + y_offset = (height - tot_y) // 2 + + for index, line in enumerate(instructions): + x,y = cv2.getTextSize(line, font, scale, thickness)[0] + x_pos = int((width - x) / 2) + y_pos = int(y_offset + (y + spacing) * index) + cv2.putText(self._instructions, line, (x_pos, y_pos), font, + scale, (255,255,255), thickness) + + + def _setup_mouse_control(self): + ''' Specify mouse control functions. ''' + self._start_point = self._end_point = None + self._catch_bindings = { + cv2.EVENT_LBUTTONDOWN : self.start_catching, + cv2.EVENT_MOUSEMOVE : self.catch_to, + cv2.EVENT_LBUTTONUP : self.end_catch, + } + + def mouse_handler(self, event, *args): + self._catch_bindings.get(event, lambda *args: None)(*args) + + def start_catching(self, x, y, *args): + ''' Start the creation of a net for catching fish. ''' + self._start_point = self._end_point = x,y + + def catch_to(self, x, y, *args): + ''' Draw the net live as it resizes. ''' + if self._start_point: + self._end_point = x,y + + def end_catch(self, x, y, *args): + ''' Register a catch attempt and check for catches. ''' + self._catch_img = self._img + self._catch_fish() + self._start_point = self._end_point = None + self._attempts += 1 + + def _catch_fish(self): + ''' Check if any fish were caught in the last attempt. ''' + # get current fish bounding boxes + try: + min_pts, max_pts = self._fish_bboxes + except ValueError: + return # no more fish to catch + min_pts = min_pts.reshape(-1,2) + max_pts = max_pts.reshape(-1,2) + + min_pt, max_pt = self._get_net_extent() + caught = self._find_caught_fish(min_pts, max_pts, + min_pt, max_pt) + self._register_catches(min_pt, max_pt, caught) + + def _get_net_extent(self): + ''' Returns the min_pt, max_pt of the net extent. ''' + pts = [] + for i in range(2): + p1 = self._start_point[i] + p2 = self._end_point[i] + if p1 < p2: + pts.append([p1,p2]) + else: + pts.append([p2,p1]) + return np.array(pts).T + + def _find_caught_fish(self, min_pts, max_pts, min_pt, max_pt): + ''' Returns an index array of caught fish. ''' + min_diff = min_pts - min_pt + max_diff = max_pt - max_pts + box_size = (max_pt - min_pt).sum() + size_ratio = box_size / (max_pts - min_pts).sum(axis=1) + caught, = np.nonzero((size_ratio < 4) & + (((min_diff > 0) & (max_diff > 0)).sum(axis=1) == 2)) + self._precision += size_ratio[caught].sum() + return caught + + def _register_catches(self, min_pt, max_pt, caught): + ''' Register catches and track which fish are free. ''' + free_fish = [] + caught_fish = [] + for index, fish in enumerate(self.fish): + if index in caught: + caught_fish.append(fish) + fish.catch() + else: + free_fish.append(fish) + + # save image for caught fish + if len(caught): + # create the gallery if it doesn't already exist + if not isdir('gallery'): + mkdir('gallery') + + # determine relevant image filename + fish = '-'.join(f'{fish.name}_{fish.size/100:.3f}kg' + for fish in caught_fish) + pre, extension = 'gallery/caught_', '.png' + filename = pre + fish + extension + + # put a count at the end if the fish has already been caught + count = 0 + while isfile(filename): + count += 1 + filename = f'{pre}{fish}({count}){extension}' + + # ensure image is within frame + min_pt[min_pt < 0] = 0 + max_pt[0] = min(max_pt[0], self.dims[1]) + max_pt[1] = min(max_pt[1], self.dims[0]) + + # write to file + cv2.imwrite(filename, self._catch_img[min_pt[1]:max_pt[1], + min_pt[0]:max_pt[0]]) + + self.caught_fish.extend(caught_fish) + self.fish = free_fish + + @property + def _fish_bboxes(self): + ''' Returns an array of the min_pts and max_pts of each fish. ''' + return np.c_[tuple(fish.bbox for fish in self.fish)] + + def toggle_gradient(self, vid=None): + ''' Toggle gradient display mode on or off. ''' + self._gradient ^= True + + def toggle_instructions(self, vid=None): + ''' Toggle the instructions display on or off. ''' + self._instructions_visible ^= True + + def preprocess(self, img): + ''' Light preprocessing. ''' + self._t += 1 + max_accel = 30 + + blur = cv2.GaussianBlur(img, (7,7), 0) + flipped = cv2.flip(blur, 1) # mirror webcam + grey = np.float32(cv2.cvtColor(flipped, cv2.COLOR_BGR2GRAY)) / 255 + + # calculate acceleration from difference to previous image + diff = (grey - self._prev + 1) / 2 + x_accel = cv2.Sobel(diff, cv2.CV_64F, 1, 0, ksize=5) + x_accel /= (1e-10+x_accel.max()-x_accel.min()) / max_accel + y_accel = cv2.Sobel(diff, cv2.CV_64F, 0, 1, ksize=5) + y_accel /= (1e-10+y_accel.max()-y_accel.min()) / max_accel + self._acceleration = x_accel, y_accel + self._prev = grey + + return flipped + + def __call__(self, flipped): + ''' Main processing, while waiting for next image. ''' + if self._gradient: + x_accel, y_accel = self._acceleration + max_val = np.max([x_accel.max(), y_accel.max()]) + min_val = np.min([x_accel.min(), y_accel.min()]) + range_ = max_val - min_val + x_norm = (x_accel - min_val) / range_ + y_norm = (y_accel - min_val) / range_ + gradient = cv2.addWeighted(x_norm, 0.5, y_norm, 0.5, 0.0) + flipped = cv2.merge([np.uint8(255 * gradient)]*3) + else: + self._draw_water(flipped) + + self._draw_fish(flipped, self._acceleration) + self._img = flipped + self._text_overlay(flipped) + self._draw_net(flipped) + + return flipped + + def _draw_water(self, img): + # make some blue and green that varies a bit with time + blue = np.zeros(img.shape, dtype=img.dtype) + mag = 30 * np.sin(self._t/100) + blue[:,:,0] = 200 + mag * np.sin(np.arange(img.shape[1])/(mag/6+50)) + blue[:,:,1] = 40 + mag * np.sin(np.arange(img.shape[0])[:,None] \ + / (mag/6+50)) + + # blend with the background image + alpha = 0.45 + beta = 1 - alpha + cv2.addWeighted(img, alpha, blue, beta, 0.0, img) + + def _draw_fish(self, img, acceleration): + ''' Draw in all the free and caught fish. ''' + for fish in self.fish: + fish.draw(img, acceleration) + for fish in self.caught_fish: + fish.draw(img, None) + + def _text_overlay(self, img): + ''' Show instructions or how many fish have been caught + stats. ''' + if self._instructions_visible: + cv2.addWeighted(img, 0.3, self._instructions, 0.7, 0, img) + return + + caught_fish = len(self.caught_fish) + texts = [f'Caught {caught_fish}/{self._num_fish}'] + if self._attempts: + texts.append(f'Hit rate: {100 * caught_fish / self._attempts:.2f}%') + if caught_fish: + texts.append(f'Avg size ratio: {self._precision / caught_fish:.3f}') + + for index, text in enumerate(texts): + cv2.putText(img, text, (10, 20*(index+1)), cv2.FONT_HERSHEY_SIMPLEX, + 0.6, (255,255,255), 1) + + def _draw_net(self, img): + ''' Draws the 'catch' net if one is in progress. ''' + if self._end_point: + # make thicker lines for larger net + thickness = 1 + \ + sum(abs(self._end_point[index] - self._start_point[index]) + for index in range(2)) // 90 + cv2.rectangle(img, self._start_point, self._end_point, (0,0,100), + thickness) + + +tank = FishTank((720,1280), name_file='names.txt') +window = 'Fish Tank' +with LockedCamera(0, preprocess=tank.preprocess, process=tank, display=window, + play_commands={ord('g'):tank.toggle_gradient, + ord('i'):tank.toggle_instructions}) as cam: + cam.record_stream('lol.mp4', + mouse_handler=MouseCallback(window, tank.mouse_handler)) diff --git a/build/lib/pcv/vidIO.py b/build/lib/pcv/vidIO.py new file mode 100644 index 0000000..6829c9b --- /dev/null +++ b/build/lib/pcv/vidIO.py @@ -0,0 +1,1021 @@ +#!/usr/bin/env python3 + +import cv2 +import signal +import numpy as np +from time import perf_counter, sleep +from queue import Queue +from threading import Thread, Event +from pcv.interact import DoNothing, waitKey + + +class BlockingVideoWriter(cv2.VideoWriter): + ''' A cv2.VideoWriter with a context manager for releasing. + + Generally suggested to use the non-blocking, threaded VideoWriter class + instead, unless your application requires no wait time on completion + but permits performance reduction throughout to write frames. If that's + the case, try VideoWriter anyway, and come back to this if a notable + backlog occurs (it will tell you). + + ''' + properties = { + 'quality' : cv2.VIDEOWRITER_PROP_QUALITY, + 'framebytes' : cv2.VIDEOWRITER_PROP_FRAMEBYTES, + 'nstripes' : cv2.VIDEOWRITER_PROP_NSTRIPES, + } + + # functioning combinations are often hard to find - these hopefully work + SUGGESTED_CODECS = { + 'avi' : ['H264','X264','XVID','MJPG'], + 'mp4' : ['avc1','mp4v'], + 'mov' : ['avc1','mp4v'], + 'mkv' : ['H264'], + } + + def __init__(self, filename, fourcc, fps, frameSize, isColor=True, + apiPreference=None): + ''' Initialise a BlockingVideoWriter with the given parameters. + + 'filename' The video file to write to. + 'fourcc' the "four character code" representing the writing codec. + Can be a four character string, or an int as returned by + cv2.VideoWriter_fourcc. As functioning combinations of file + extension + codec can be difficult to find, the helper method + VideoWriter.suggested_codec is provided, accepting a filename + (or file extension) and a list of previously tried codecs that + didn't work and should be excluded. Suggested codecs are populated + from VideoWriter.SUGGESTED_CODECS, if you wish to view the + suggested options directly. + 'fps' is the framerate (frames per second) to save as. It is a constant + float, and can only be set on initialisation. To have a video that + plays back faster than the recording stream, set the framerate to + higher than the input framerate. The VideoWriter.from_camera + factory function is provided to create a video-writer directly + from a camera instance, and allows measuring of the input framerate + for accurate output results if desired. + 'frameSize' is the size of the input frames as a tuple of (rows, cols). + 'isColor' is a boolean specifying if the saved images are coloured. + Defaults to True. Set False for greyscale input streams. + 'apiPreference' allows specifying which API backend to use. It can be + used to enforce a specific reader implementation if multiple are + available (e.g. cv2.CAP_FFMPEG or cv2.CAP_GSTREAMER). Generally + this is not required, and if left as None it is ignored. + + ''' + self.filename = filename + self.fps = fps + self.is_color = isColor + self.frame_size = frameSize + self.api_preference = apiPreference + self.set_fourcc(fourcc) + + super().__init__(*self._construct_open_args()) + + def set_fourcc(self, fourcc): + ''' Set fourcc code as an integer or an iterable of 4 chars. ''' + self.fourcc = fourcc # save for checking value + if not isinstance(fourcc, int): + # assume iterable of 4 chars + fourcc = cv2.VideoWriter_fourcc(*fourcc) + self._fourcc = fourcc + + def _construct_open_args(self): + args = [self.filename, self._fourcc, self.fps, self.frame_size, + self.is_color] + if self.api_preference is not None: + args = [args[0], self.api_preference, *args[1:]] + return args + + def __enter__(self): + ''' Re-entrant ''' + if not self.isOpened(): + self.open(*self._construct_open_args()) + return self + + def __exit__(self, *args): + self.release() + + def get(self, property): + ''' Returns 'property' value, or 0 if not supported by the backend. + + 'property' can be a string key for the VideoWriter.properties + dictionary or an integer from cv2.VIDEOWRITER_PROP_* + + self.get(str/int) -> float + + ''' + try: + return super().get(self.properties[property.lower()]) + except AttributeError: + return super().get(property) + + def set(self, property, value): + ''' Attempts to set the specified property value. + Returns True if the property is supported by the backend in use. + + 'property' can be a string key for the VideoWriter.properties + dictionary or an integer from cv2.VIDEOWRITER_PROP_* + 'value' should be a float + + self.set(str/int, float) -> bool + + ''' + try: + return super().set(self.properties[property.lower()], value) + except AttributeError: + return super().set(property, value) + + @classmethod + def suggested_codec(cls, filename, exclude=[]): + extension = filename.split('.')[-1] + try: + return [codec for codec in cls.SUGGESTED_CODECS[extension.lower()] + if codec not in exclude][0] + except IndexError: + raise Exception('No codecs available, try a different extension') + + @classmethod + def from_camera(cls, filename, camera, fourcc=None, isColor=True, + apiPreference=None, fps=-3): + ''' Returns a VideoWriter based on the properties of the input camera. + + 'filename' is the name of the file to save to. + 'camera' is the SlowCamera instance (or any of its subclasses). + 'fourcc' is the codec four-character code. If left as None is + determined automatically from filename. + 'isColor' specifies if the video stream is colour or greyscale. + 'fps' can be set as a float, 'camera' to ask the camera for the value, + or a negative integer to measure over that number of frames. + If no processing is occurring, 'camera' is suggested, otherwise + it is generally best to measure the frame output. + Defaults to -3, to measure over 3 frames. + + ''' + if fourcc is None: + fourcc = cls.suggested_codec(filename) + frameSize = tuple(int(camera.get(dim)) for dim in ('width','height')) + + if fps == 'camera': + fps = camera.get('fps') + elif fps < 0: + fps = camera.measure_framerate(-fps) + + return cls(filename, fourcc, fps, frameSize, isColor, apiPreference) + + def __repr__(self): + return (f'{self.__class__.__name__}(filename={repr(self.filename)}, ' + f'fourcc={repr(self.fourcc)}, fps={self.fps}, ' + f'frameSize={self.frame_size}, isColor={self.is_colour}, ' + f'apiPreference={self.api_preference})') + + +class VideoWriter(BlockingVideoWriter): + ''' A non-blocking thread-based video writer, using a queue. ''' + def __init__(self, *args, maxsize=0, verbose_exit=True, **kwargs): + ''' Initialise the video writer. + + 'maxsize' is the maximum allowed frame buildup before adding frames + blocks execution. Defaults to 0 (no maximum). Set a meaningful + number if you have fast processing, limited memory, and can't + afford the time required to wait at the end once you've finished + recording. Setting a number for this is helpful in early testing + to get notified of cases where writing to disk is a bottleneck + (you may get processing freezes from time to time as a result). + Consistently slow write times may indicate a need for a more + efficient file format, memory type, or just lower resolution in + time or space (ie fewer fps or smaller images). + 'verbose_exit' is a boolean indicating if the writer should notify + you on exit if a backlog wait will be required, and if so once it + completes and how long it took. Defaults to True. + + *args and **kwargs are the same as those for BlockingVideoWriter. + + ''' + super().__init__(*args, **kwargs) + self._initialise_writer(maxsize) + self._verbose_exit = verbose_exit + + def _initialise_writer(self, maxsize): + ''' Start the Thread for grabbing images. ''' + self.max_queue_size = maxsize + self._write_queue = Queue(maxsize=maxsize) + self._image_writer = Thread(name='writer', target=self._writer, + daemon=True) + self._image_writer.start() + + def _writer(self): + ''' Write frames forever, until ''' + while "not finished": + # retrieve an image, wait indefinitely if necessary + img = self._write_queue.get() + # write the image to file ('where' is specified outside) + super().write(img) + # inform the queue that a frame has been written + self._write_queue.task_done() + + def write(self, img): + ''' Send 'img' to the write queue. ''' + self._write_queue.put(img) + + def __exit__(self, *args): + ''' Wait for writing to complete, and release writer. ''' + # assume not waiting + waited = False + + # check if waiting required + if self._verbose_exit and not self._write_queue.empty(): + print(f'Writing {self._write_queue.qsize()} remaining frames.') + print('Force quitting may result in a corrupted video file.') + waited = perf_counter() + + # finish writing all frames + self._write_queue.join() + + # cleanup as normal + super().__exit__(*args) + + # if wait occurred, inform of completion + if waited and self._verbose_exit: + print(f'Writing complete in {perf_counter()-waited:.3f}s.') + + +class OutOfFrames(StopIteration): + def __init__(msg='Out of video frames', *args, **kwargs): + super().__init__(msg, *args, **kwargs) + + +class UserQuit(StopIteration): + def __init__(msg='User quit manually', *args, **kwargs): + super().__init__(msg, *args, **kwargs) + + +class ContextualVideoCapture(cv2.VideoCapture): + ''' A cv2.VideoCapture with a context manager for releasing. ''' + properties = { + 'fps' : cv2.CAP_PROP_FPS, + 'mode' : cv2.CAP_PROP_MODE, + 'width' : cv2.CAP_PROP_FRAME_WIDTH, + 'height' : cv2.CAP_PROP_FRAME_HEIGHT, + 'backend' : cv2.CAP_PROP_BACKEND, + } + # more properties + descriptions can be found in the docs: + # https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html#gaeb8dd9c89c10a5c63c139bf7c4f5704d + + def __init__(self, id, *args, display='frame', delay=None, quit=ord('q'), + play_pause=ord(' '), pause_effects={}, play_commands={}, + destroy=-1, **kwargs): + ''' A pausable, quitable, iterable video-capture object + with context management. + + 'id' is the id that gets passed to the underlying VideoCapture object. + it can be an integer to select a connected camera, or a filename + to open a video. + 'display' is used as the default window name when streaming. Defaults + to 'frame'. + 'delay' is the integer millisecond delay applied between each iteration + to enable windows to update. If set to None, this is skipped and + the user must manually call waitKey to update windows. + Default is None, which allows headless operation without + unnecessary waiting. + 'quit' is an integer ordinal corresponding to a key which can be used + to stop the iteration loop. Only applies if delay is not None. + Default is ord('q'), so press the 'q' key to quit when iterating. + 'play_pause' is an integer ordinal corresponding to a key which can be + used to pause and resume the iteration loop. Only applies if delay + is not None. Default is ord(' '), so press space-bar to pause/ + resume when iterating. + 'pause_effects' is a dictionary of key ordinals and corresponding + handler functions. The handler will be passed self as its only + argument, which gives it access to the 'get' and 'set' methods, + as well as the 'status' and 'image' properties from the last 'read' + call. This can be useful for logging, selecting images for + labelling, or temporary manual control of the event/read loop. + Note that this is only used while paused, and does not get passed + quit or play_pause key events. + 'play_commands' is the same as 'pause_effects' but operates instead + while playback/streaming is occurring. For live processing, + this can be used to change playback modes, or more generally for + similar scenarios as 'pause_effects'. + 'destroy' destroys any specified windows on context exit. Can be 'all' + to destroy all active opencv windows, a string of a specific window + name, or a list of window names to close. If left as -1, destroys + the window specified in 'display'. + + ''' + super().__init__(id, *args, **kwargs) + self._id = id + self.display = display + self._delay = delay + self._quit = quit + self._play_pause = play_pause + self._pause_effects = pause_effects + self._play_commands = play_commands + self._destroy = destroy + + self._api_preference = kwargs.get('apiPreference', None) + + def __enter__(self): + ''' Enter a re-entrant context for this camera. ''' + if not self.isOpened(): + if self._api_preference: + self.open(self._id, self._api_preference) + else: + self.open(self._id) + + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + ''' Clean up on context exit. + + Releases the internal VideoCapture object, and destroys any windows + specified at initialisation. + + ''' + # release VideoCapture object + self.release() + + # clean up window(s) if specified on initialisation + destroy = self._destroy + try: + if destroy == -1: + cv2.destroyWindow(self.display) + elif destroy == 'all': + cv2.destroyAllWindows() + elif isinstance(destroy, str): + # a single window name + cv2.destroyWindow(destroy) + elif destroy is not None: + # assume an iterable of multiple windows + for window in destroy: cv2.destroyWindow(window) + else: + return # destroy is None + except cv2.error as e: + print('Failed to destroy window(s)', e) + + waitKey(1) # allow the GUI manager to update + + def __iter__(self): + return self + + def __next__(self): + # check if doing automatic waits + if self._delay is not None: + key = waitKey(self._delay) + + if key == self._quit: + raise UserQuit + elif key == self._play_pause: + self._handle_pause() + else: + # pass self to a triggered user-defined key handler, or nothing + self._play_commands.get(key, lambda cap: None)(self) + + # wait completed, get next frame if possible + if self.isOpened(): + return self.read() + raise OutOfFrames + + def _handle_pause(self): + ''' Handle event loop and key-presses while paused. ''' + while "paused": + key = waitKey(1) + if key == self._quit: + raise UserQuit + if key == self._play_pause: + break + # pass self to a triggered user-defined key handler, or do nothing + self._pause_effects.get(key, lambda cap: None)(self) + + def stream(self, mouse_handler=DoNothing()): + ''' Capture and display stream on window specified at initialisation. + + 'mouse_handler' is an optional MouseCallback instance determining + the effects of mouse clicks and moves during the stream. Defaults + to DoNothing. + + ''' + with mouse_handler: + for read_success, frame in self: + if read_success: + cv2.imshow(self.display, frame) + else: + break # camera disconnected + + def headless_stream(self): + ''' Capture and process stream without display. ''' + for read_success, frame in self: + if not read_success: break # camera disconnected + + def record_stream(self, filename, show=True, mouse_handler=DoNothing()): + ''' Capture and record stream, with optional display. + + 'filename' is the file to save to. + 'show' is a boolean specifying if the result is displayed (on the + window specified at initialisation). + 'mouse_handler' is an optional MouseCallback instance determining + the effects of mouse clicks and moves during the stream. It is only + useful if 'show' is set to True. Defaults to DoNothing. + + ''' + with VideoWriter.from_camera(filename, self) as writer, mouse_handler: + for read_success, frame in self: + if read_success: + if show: + cv2.imshow(self.display, frame) + writer.write(frame) + else: + break # camera disconnected + + def get(self, property): + ''' Return the value of 'property' if it exists, else 0.0. ''' + try: + return super().get(self.properties.get(property, property)) + except TypeError: # property must be an unknown string + return super().get(eval('cv2.CAP_PROP_'+property.upper())) + + def set(self, property, value): + ''' Attempts to set 'property' to 'value', returning success. ''' + try: + return super().set(self.properties.get(property, property), value) + except TypeError: # 'property' must be an unknown string + return super().set(eval('cv2.CAP_PROP_'+property.upper)) + + def read(self, image=None): + if image is not None: + status, image = super().read(image) + else: + status, image = super().read() + self.status, self.image = status, image + return status, image + + +class SlowCamera(ContextualVideoCapture): + ''' A basic, slow camera class for processing frames relatively far apart. + + Use 'Camera' instead unless you need to reduce power/CPU usage and the time + to read an image is insignificant in your processing pipeline. + + ''' + def __init__(self, camera_id=0, *args, delay=1, **kwargs): + ''' Create a camera capture instance with the given id. + + Arguments are the same as ContextualVideoCapture, but 'id' is replaced + with 'camera_id', and 'delay' is set to 1 by default instead of + None. + + ''' + super().__init__(camera_id, *args, delay=delay, **kwargs) + + def measure_framerate(self, frames): + ''' Measure framerate for specified number of frames. ''' + count = 0 + for read_success, frame in self: + if self.display: + cv2.imshow(self.display, frame) + count += 1 + if count == 1: + start = perf_counter() # avoid timing opening the window + if count > frames: + # desired frames reached, set fps as average framerate + return count / (perf_counter() - start) + + def __repr__(self): + return f"{self.__class__.__name__}(camera_id={repr(self._id)})" + + +class Camera(SlowCamera): + ''' A camera for always capturing the latest frame, fast. + + Use this instead of 'SlowCamera', unless you need to reduce power/CPU + usage, and the time to read an image is insignificant in your processing + pipeline. + + ''' + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._initialise_grabber() + + def _initialise_grabber(self): + ''' Start the Thread for grabbing images. ''' + self._finished = Event() + self._image_grabber = Thread(name='grabber', target=self._grabber, + daemon=True) # auto-kill when finished + self._image_grabber.start() + self._wait_for_grabber_start() + + def _grabber(self): + ''' Grab images as fast as possible - only latest gets processed. ''' + while not self._finished.is_set(): + self.grab() + + def _wait_for_grabber_start(self): + ''' Waits for a successful retrieve. Raises Exception after 50 attempts. ''' + for check in range(50): + if self.retrieve()[0]: break + sleep(0.1) + else: + raise Exception(f'Failed to start {self.__class__.__name__}') + + def __exit__(self, *args): + self._finished.set() + self._image_grabber.join() + super().__exit__(*args) + + def read(self, image=None): + ''' Read and return the latest available image. ''' + if image is not None: + status, image = self.retrieve(image) + else: + status, image = self.retrieve() + self.status, self.image = status, image + return status, image + + +class LockedCamera(Camera): + ''' A camera for semi-synchronously capturing a single image at a time. + + Like 'Camera' but uses less power+CPU by only capturing images on request. + Allows specifying when each image should start being captured, then doing + some processing while the image is being grabbed and decoded (and + optionally pre-processed), before using it. + + Images may be less recent than achieved with Camera, depending on when the + user starts the capture process within their processing pipeline, but can + also be more recent if started near the end of the pipeline (at the risk of + having to wait for the capturing to complete). + + ''' + def __init__(self, *args, preprocess=lambda img:img, + process=lambda img:img, **kwargs): + ''' Create a camera capture instance with the given id. + + 'preprocess' is an optional function which takes an image and returns + a modified image, which gets applied to each frame on read. + Defaults to no preprocessing. + 'process' is an optional function which takes an image and returns + a modified image, which gets applied to each preprocessed frame + after the next frame has been requested. Defaults to no processing. + + *args and **kwargs are the same as for Camera. + + ''' + super().__init__(*args, **kwargs) + self._preprocess = preprocess + self._process = process + self._get_latest_image() # start getting the first image + + def _initialise_grabber(self): + ''' Create locks and start the grabber thread. ''' + self._image_desired = Event() + self._image_ready = Event() + super()._initialise_grabber() + + def _grabber(self): + ''' Grab and preprocess images on demand, ready for later usage ''' + while not self._finished.is_set(): + self._wait_until_needed() + # read the latest frame + read_success, frame = super(ContextualVideoCapture, self).read() + if not read_success: + raise IOError('Failure to read frame from camera.') + + # apply any desired pre-processing and store for main thread + self._preprocessed = self._preprocess(frame) + # inform that image is ready for access/main processing + self._inform_image_ready() + + def _wait_for_grabber_start(self): + ''' Not used - done automatically with Events. ''' + pass + + def _wait_until_needed(self): + ''' Wait for main to request the next image. ''' + self._image_desired.wait() + self._image_desired.clear() + + def _inform_image_ready(self): + ''' Inform main that next image is available. ''' + self._image_ready.set() + + def _get_latest_image(self): + ''' Ask camera handler for next image. ''' + self._image_desired.set() + + def _wait_for_camera_image(self): + ''' Wait until next image is available. ''' + self._image_ready.wait() + self._image_ready.clear() + + def __exit__(self, *args): + self._finished.set() + self._image_desired.set() # allow thread to reach finished check + super().__exit__(*args) + + def read(self, image=None): + ''' For optimal usage, tune _process to take the same amount of time + as getting the next frame. + ''' + self._wait_for_camera_image() + preprocessed = self._preprocessed + self._get_latest_image() + self.image = self._process(preprocessed) + if image is not None: + image = self.image + self.status = True + return self.status, self.image + + +class VideoReader(LockedCamera): + ''' A class for reading video files. ''' + properties = { + **ContextualVideoCapture.properties, + 'frame' : cv2.CAP_PROP_POS_FRAMES, + 'codec' : cv2.CAP_PROP_FOURCC, + 'timestamp' : cv2.CAP_PROP_POS_MSEC, + 'num_frames' : cv2.CAP_PROP_FRAME_COUNT, + 'proportion' : cv2.CAP_PROP_POS_AVI_RATIO, + } + + FASTER, SLOWER, REWIND, FORWARD, RESET = (ord(key) for key in 'wsadr') + FORWARD_DIRECTION, REVERSE_DIRECTION = 1, -1 + MIN_DELAY = 1 # integer milliseconds + + def __init__(self, filename, *args, start=None, end=None, auto_delay=True, + fps=None, skip_frames=None, verbose=True, display='video', + **kwargs): + ''' Initialise a video reader from the given file. + + 'filename' is the string path of a video file. Depending on the file + format some features may not be available. + 'start' and 'end' denote the respective times of playback, according + to the specified fps. They can be integers of milliseconds, or + strings of 'hours:minutes:seconds' (larger amounts can be left off + if 0, e.g. '5:10.35' for no hours). If left as None, the video + starts and ends at the first and last frames respectively. + It is expected that 'start' < 'end', or playback ends immediately. + 'auto_delay' is a boolean specifying if the delay between frames should + be automatically adjusted during playback to match the specified + fps. Set to False if operating headless (not viewing the video), or + if manual control is desired while iterating over the video. + If set to False, sets 'destroy' to None if not otherwise set. + If True enables playback control with 'w' increasing playback + speed, 's' slowing it down, 'a' rewinding (only possible if + 'skip_frames' is True), and 'd' returning to forwards playback. + The 'r' key can be pressed to reset to 1x speed and forwards + direction playback. 'a' and 'd' can be used while paused to step + back and forwards, regardless of skip_frames. These defaults can be + overridden using the 'play_commands' and 'pause_effects' keyword + arguments, supplying a dictionary of key ordinals that sets the + desired behaviour. Note that the defaults are set internally, so to + turn them off the dictionary must be used, with e.g. + play_commands={ord('a'):lambda vid:None} to disable rewinding. + 'fps' is a float specifying the desired frames per second for playback. + If left as None the fps is read from file, or if that fails is set + to 25 by default. Value is ignored if 'auto_delay' is False. + 'skip_frames' allows frames to be manually set, as required by reverse + or high speed playback. If left as None this is disallowed. If + 'auto_delay' is True, any integer value can be set (suggested 0), + and the number of frames to skip at each iteration is determined + as part of the delay tuning. If 'auto_delay' is False, an integer + can be used as a consistent number of frames to skip at each + iteration (e.g. only read every 10th frame). Note that enabling + frame skipping can make playback jerky on devices and/or file + formats with slow video frame setting times, and inconsistent + skipping amounts with 'auto_delay' may cause issues with + time-dependent processing. + 'verbose' is a boolean determining if playback speed and direction + changes are printed to the terminal. Defaults to True. + + *args and **kwargs get passed up the inheritance chain, with notable + keywords including the 'preprocess' and 'process' functions which + take an image and return a processed result (see LockedCamera), + the 'quit' and 'play_pause' key ordinals which are checked if + 'auto_delay' is True, and the 'play_commands' and 'pause_effects' + dictionaries mapping key ordinals to desired functionality while + playing and paused (see ContextualVideoCapture documentation for + details). + + ''' + super().__init__(filename, *args, display=display, **kwargs) + self.filename = filename + self._fps = fps or self.fps or 25 # user-specified or auto-retrieved + self._period = 1e3 / self._fps + self._initialise_delay(auto_delay) + self._initialise_playback(start, end, skip_frames, verbose) + + def _initialise_delay(self, auto_delay): + ''' Determines the delay automatically, or leaves as None. ''' + if auto_delay: + if self._fps == 0 or self._fps >= 1e3: + print('failed to determine fps, setting to 25') + self._period = 1e3 / 25 + # set a bit low to allow image read times + self._delay = self._period - 5 + else: + self._delay = int(self._period) + print('delay set automatically to', + f'{self._delay}ms from fps={self._fps}') + else: + self._delay = None + if self._destroy == -1: + self._destroy = None + + def _initialise_playback(self, start, end, skip_frames, verbose): + ''' Set up playback settings as specified. ''' + self._wait_for_camera_image() # don't set frame while grabber running + + self._set_start(start) + self._set_end(end) + + self._skip_frames = skip_frames + self._verbose = verbose + self._direction = self.FORWARD_DIRECTION + self._speed = 1 + self._adjusted_period = self._period + self._calculate_frames() + + self._play_commands = { + self.FASTER : self._speed_up, + self.SLOWER : self._slow_down, + self.REWIND : self._go_back, + self.FORWARD : self._go_forward, + self.RESET : self._reset, + **self._play_commands + } + + # add step back and forward functionality if keys not already used + self._pause_effects = { + self.REWIND : self.step_back, + self.FORWARD : self.step_forward, + **self._pause_effects + } + + # ensure time between frames is ignored while paused + class LogDict(dict): + def get(this, *args, **kwargs): + self._prev = perf_counter() - (self._period - self.MIN_DELAY) / 1e3 + return dict.get(this, *args, **kwargs) + + self._pause_effects = LogDict(self._pause_effects) + + self._get_latest_image() # re-initialise as ready + + def _set_start(self, start): + ''' Set the start of the video to user specification, if possible. ''' + if start is not None: + if self.set_timestamp(start): + print(f'starting at {start}') + else: + print('start specification failed, starting at 0:00') + self._frame = 0 + else: + self._frame = 0 + + def _set_end(self, end): + ''' Set playback to end where specified by user. ''' + if end is not None: + if isinstance(end, str): + self._end = self.timestamp_to_ms(end) + else: + self._end = end + self._end /= self._period # convert to number of frames + else: + self._end = np.inf + + @staticmethod + def _speed_up(vid): + ''' Increase the speed by 10% of the initial value. ''' + vid._speed += 0.1 + vid._register_speed_change() + + @staticmethod + def _slow_down(vid): + ''' Reduce the speed by 10% of the initial value. ''' + vid._speed -= 0.1 + vid._register_speed_change() + + def _register_speed_change(self): + ''' Update internals and print new speed. ''' + self._calculate_period() + if self._verbose: + print(f'speed set to {self._speed:.1f}x starting fps') + + def _calculate_period(self): + ''' Determine the adjusted period given the speed. ''' + self._adjusted_period = self._period / self._speed + self._calculate_timestep() + + def _calculate_timestep(self): + ''' Determine the desired timestep of each iteration. ''' + self._timestep = self._adjusted_period * self._frames + + def _calculate_frames(self): + ''' Determine the number of frames to increment each iteration. ''' + self._frames = (1 + self._skip_frames + if self._skip_frames is not None + else 1) + self._calculate_timestep() + + @staticmethod + def _go_back(vid): + ''' Set playback to backwards. ''' + if vid._skip_frames is not None: + vid._direction = vid.REVERSE_DIRECTION + if vid._verbose: + print('Rewinding') + else: + if vid._verbose: + print('Cannot go backwards without skip_frames=True') + + @staticmethod + def _go_forward(vid): + ''' Set playback to go forwards. ''' + vid._direction = vid.FORWARD_DIRECTION + if vid._verbose: + print('Going forwards') + + @staticmethod + def _reset(vid): + ''' Restore playback to 1x speed and forwards. ''' + vid._speed = 1 + vid._direction = vid.FORWARD_DIRECTION + vid._calculate_period() + if vid._verbose: + print(f'Going forwards with speed set to 1x starting fps') + + @staticmethod + def step_back(vid): + ''' Take a step backwards. ''' + # store existing state + old_state = (vid._skip_frames, vid._direction, vid._verbose) + + # enable back-stepping if not currently permitted + vid._skip_frames = 0 + # make sure no unnecessary prints trigger from playback keys + vid._verbose = False + + # go back a step + vid._direction = vid.REVERSE_DIRECTION + next(vid) + + # restore state + vid._skip_frames, vid._direction, vid._verbose = old_state + + @staticmethod + def step_forward(vid): + ''' Take a step forwards. ''' + # store existing state + old_state = (vid._direction, vid._verbose) + + # make sure no unnecessary prints trigger from playback keys + vid._verbose = False + + # go forwards a step + vid._direction = vid.FORWARD_DIRECTION + next(vid) + + # restore state + vid._direction, vid._verbose = old_state + + @property + def fps(self): + ''' The constant FPS assumed of the video file. ''' + return self.get('fps') + + @property + def frame(self): + ''' Retrieve the current video frame. ''' + self._frame = int(self.get('frame')) + return self._frame + + def set_frame(self, frame): + ''' Attempts to set the frame number, returns success. + + 'frame' is an integer greater than 0. Setting past the last frame + either has no effect or ends the playback. + + self.set_frame(int) -> bool + + ''' + if self.set('frame', frame): + self._frame = frame + return True + return False + + @property + def timestamp(self): + ''' Returns the video timestamp if possible, else 0.0. + + Returns a human-readable time string, as hours:minutes:seconds. + For the numerical ms value use self.get('timestamp') instead. + + self.timestamp -> str + + ''' + # cv2.VideoCapture returns ms timestamp -> convert to meaningful time + seconds = self.get('timestamp') / 1000 + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + if hours: + return f'{hours}:{minutes}:{seconds:.3f}' + if minutes: + return f'{minutes}:{seconds:.3f}' + return f'{seconds:.3f}s' + + def set_timestamp(self, timestamp): + ''' Attempts to set the timestamp as specified, returns success. + + 'timestamp' can be a float/integer of milliseconds, or a string + of 'hours:minutes:seconds', 'minutes:seconds', or 'seconds', + where all values can be integers or floats. + + self.set_timestamp(str/float/int) -> bool + + ''' + ms = self.timestamp_to_ms(timestamp) if isinstance(timestamp, str) \ + else timestamp + fps = self._fps + if fps == 0: + # fps couldn't be determined - set ms directly and hope + return self.set('timestamp', ms) + return self.set_frame(int(ms * fps / 1e3)) + + @staticmethod + def timestamp_to_ms(timestamp): + ''' Converts a string timestamp of hours:minutes:seconds to ms.''' + return 1000 * sum(60 ** index * float(period) for index, period \ + in enumerate(reversed(timestamp.split(':')))) + + def __iter__(self): + if self._delay is not None: + self._prev = perf_counter() + self._error = 0 + self._delay = 1 + return self + + def __next__(self): + if self._delay is not None: + # auto-adjust to get closer to desired fps + now = perf_counter() + diff = 1e3 * (now - self._prev) # s to ms + self._error += diff - self._timestep + + self._update_playback_settings() + + self._prev = now + self._update_frame_tracking() + + return super().__next__() + + def _update_playback_settings(self): + ''' Adjusts delay/frame skipping if error is sufficiently large. ''' + error_magnitude = abs(self._error) + if error_magnitude > self.MIN_DELAY: + # determine distribution of change + if self._skip_frames is not None: + # can only skip full frames, rest left to delay + skip_frames_change, delay_change = \ + divmod(error_magnitude, self._adjusted_period) + else: + delay_change = error_magnitude + # can only delay in MIN_DELAY increments, remainder is error + delay_change, new_error_mag = \ + divmod(delay_change, self.MIN_DELAY) + + # determine if going too slowly (+) or too fast (-) + sign = np.sign(self._error) + # implement delay (and skip frames) change + # reducing delay increases speed + self._delay -= int(sign * delay_change) + if self._skip_frames is not None: + # skipping additional frames increases speed + self._skip_frames += int(sign * skip_frames_change) + self._calculate_frames() # update internals + + self._error = sign * new_error_mag + if self._delay < self.MIN_DELAY: + self._error += self.MIN_DELAY - self._delay + self._delay = self.MIN_DELAY + + def _update_frame_tracking(self): + # frame skip with no auto-delay allows continual frame skipping + # only set frame if necessary (moving one frame ahead isn't helpful) + if self._skip_frames is not None and \ + (self._direction == -1 or self._frames != 1): + self._image_ready.wait() + self.set_frame(self._frame + self._frames * self._direction) + else: + self._frame += 1 + + if self._frame > self._end: + raise OutOfFrames + + def __repr__(self): + return f"{self.__class__.__name__}(filename={repr(self.filename)})" + + +if __name__ == '__main__': + with Camera(0) as cam: + cam.stream() + diff --git a/dist/pythonic-cv-1.1.3.tar.gz b/dist/pythonic-cv-1.1.3.tar.gz new file mode 100644 index 0000000..676062e Binary files /dev/null and b/dist/pythonic-cv-1.1.3.tar.gz differ diff --git a/dist/pythonic_cv-1.1.3-py3-none-any.whl b/dist/pythonic_cv-1.1.3-py3-none-any.whl new file mode 100644 index 0000000..f7ae3e4 Binary files /dev/null and b/dist/pythonic_cv-1.1.3-py3-none-any.whl differ diff --git a/pythonic_cv.egg-info/PKG-INFO b/pythonic_cv.egg-info/PKG-INFO new file mode 100644 index 0000000..ef56e40 --- /dev/null +++ b/pythonic_cv.egg-info/PKG-INFO @@ -0,0 +1,155 @@ +Metadata-Version: 2.1 +Name: pythonic-cv +Version: 1.1.3 +Summary: Performant pythonic wrapper of unnecessarily painful opencv functionality +Home-page: https://github.com/ES-Alexander/pythonic-cv +Author: ES-Alexander +Author-email: sandman.esalexander@gmail.com +License: UNKNOWN +Description: _________________________________ + Version: 1.1.3 + Author: ES Alexander + Release Date: 19/Sep/2020 + _________________________________ + + # About + OpenCV is a fantastic tool for computer vision, with significant Python support + through automatically generated bindings. Unfortunately some basic functionality + is frustrating to use, and documentation is sparse and fragmented as to how best to + approach even simple tasks such as efficiently processing a webcam feed. + + This library aims to address frustrations in the OpenCV Python api that can be + fixed using pythonic constructs and methodologies. Solutions are not guaranteed to + be optimal, but every effort has been made to make the code as performant as + possible while ensuring ease of use and helpful errors/documentation. + + # Requirements + This library requires an existing version of `OpenCV` with Python bindings to be + installed (e.g. `python3 -m pip install opencv-python`). Some features (mainly + property access helpers) may not work for versions of OpenCV earlier than 4.2.0. + The library was tested using Python 3.7.2, and is expected to work down to at least + Python 3.4 (although the integrated advanced features example uses matmul (@) for + some processing, which was introduced in Python 3.5). + + `Numpy` is also used throughout, so a recent version is suggested (tested with 1.19.0). + + # Installation + The library can be installed from pip, with `python3 -m pip install pythonic-cv`. + + # Usage + New functionality is provided in the `pcv` module, as described below. All other + opencv functionality should be accessed through the standard `cv2` import. + + The main implemented functionality is handling video through a context manager, + while also enabling iteration over the input stream. While iterating, key-bindings + have been set up for play/pause (`SPACE`) and stopping playback (`q`). A dictionary + of pause_effects can be passed in to add additional key-bindings while paused without + needing to create a subclass. In addition, video playback can be sped up with `w`, + slowed down with `s`, and if enabled allows rewinding with `a` and returning to + forwards playback with `d`. Forwards playback at 1x speed can be restored with `r`. + While paused, video can be stepped backwards and forwards using `a` and `d`. All + default key-bindings can be overwritten using the play_commands and pause_effects + dictionaries and the quit and play_pause variables on initialisation. + + For reading and writing video files, the `VideoReader` and `VideoWriter` classes should + be used. For streaming, the classes `Camera`, `SlowCamera`, and `LockedCamera` are + provided. The simplest of these is `SlowCamera`, which has slow iteration because image + grabbing is performed synchronously, with a blocking call while reading each frame. + `Camera` extends `SlowCamera` with additional logic to perform repeated grabbing in a + separate thread, so processing and image grabbing can occur concurrently. `LockedCamera` + sits between the two, providing thread based I/O but with more control over when each + image is taken. + + `Camera` is most useful for applications with processing speeds that require the most + up to date information possible and don't want to waste time decoding frames that are + grabbed too early to be processed (frame grabbing occurs in a separate thread, and only + the latest frame is retrieved (decoded) on read). `SlowCamera` should only be used where + power consumption or overall CPU usage are more important than fast processing, or in + hardware that is only capable of single-thread execution, in which case the + separate image-grabbing thread will only serve to slow things down. + + `LockedCamera` is intended to work asynchronously like `Camera`, but with more control. + It allows the user to specify when the next image should be taken, which leads to less + wasted CPU and power usage on grabbing frames that aren't used, but with time for the + image to be grabbed and decoded before the next iteration needs to start. The locking + protocol adds a small amount of additional syntax, and starting the image + grabbing process too late in an iteration can result in waits similar to those in + `SlowCamera`, while starting the process too early can result in images being somewhat + out of date. Tuning can be done using the 'preprocess' and 'process' keyword arguments, + with an in-depth usage example provided in `something_fishy.py`. When used correctly + `LockedCamera` has the fastest iteration times, or if delays are used to slow down the + process it can have CPU and power usage similar to that of `SlowCamera`. + + If using a video file to simulate a live camera stream, use `SlowCamera` or + `LockedCamera` - `Camera` will skip frames. + + ## Overview + ![Overview of classes diagram](https://github.com/ES-Alexander/pythonic-cv/blob/master/Overview.png) + + ## Examples + ### Basic Camera Stream + ```python + from pcv.vidIO import Camera + from pcv.process import channel_options, downsize + + # start streaming camera 0 (generally laptop webcam/primary camera), and destroy 'frame' + # window (default streaming window) when finished. + # Auto-initialised to have 1ms waitKey between iterations, breaking on 'q' key-press, + # and play/pause using the spacebar. + with Camera(0) as cam: + cam.stream() + + # stream camera 0 on window 'channels', downsized and showing all available channels. + with LockedCamera(0, display='channels', + process=lambda img: channel_options(downsize(img, 4))) as cam: + cam.stream() + ``` + + ### Stream and Record + ```python + from pcv.vidIO import Camera + + with Camera(0) as cam: + print("press 'q' to quit and stop recording.") + cam.record_stream('me.mp4') + ``` + + ### VideoReader + ```python + from pcv.vidIO import VideoReader + from pcv.process import downsize + + # just play (simple) + with VideoReader('my_vid.mp4') as vid: + vid.stream() + + # start 15 seconds in, end at 1:32, downsize the video by a factor of 4 + with VideoReader('my_vid.mp4', start='15', end='1:32', + preprocess=lambda img: downsize(img, 4)) as vid: + vid.stream() + + # enable rewinding and super fast playback + # Press 'a' to rewind, 'd' to go forwards, 'w' to speed up, 's' to slow down + # and 'r' to reset to forwards at 1x speed. + with VideoReader('my_vid.mp4', skip_frames=0) as vid: + vid.stream() + + # headless mode (no display), operating on every 10th frame + with VideoReader('my_vid.mp4', auto_delay=False, skip_frames=10, + process=my_processing_func) as vid: + vid.headless_stream() + ``` + + ### Advanced Example (something_fishy.py) + Copy the `names.txt` file to where you want to create your gallery (or create your + own `names.txt` with one name per line, of your friends and family for example), + and run `python3 -m pcv.something_fishy` to run the example. There are several + generally useful processing techniques included, so take a look through the code + and find the functionality that's most interesting to you to explore and modify. + +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.6 +Description-Content-Type: text/markdown diff --git a/pythonic_cv.egg-info/SOURCES.txt b/pythonic_cv.egg-info/SOURCES.txt new file mode 100644 index 0000000..6810bb8 --- /dev/null +++ b/pythonic_cv.egg-info/SOURCES.txt @@ -0,0 +1,11 @@ +README.md +setup.py +pcv/__init__.py +pcv/interact.py +pcv/process.py +pcv/something_fishy.py +pcv/vidIO.py +pythonic_cv.egg-info/PKG-INFO +pythonic_cv.egg-info/SOURCES.txt +pythonic_cv.egg-info/dependency_links.txt +pythonic_cv.egg-info/top_level.txt \ No newline at end of file diff --git a/pythonic_cv.egg-info/dependency_links.txt b/pythonic_cv.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/pythonic_cv.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/pythonic_cv.egg-info/top_level.txt b/pythonic_cv.egg-info/top_level.txt new file mode 100644 index 0000000..696f3df --- /dev/null +++ b/pythonic_cv.egg-info/top_level.txt @@ -0,0 +1 @@ +pcv