From 982d28410b1c8f0c490c0b9f430a3afbd6dd88e8 Mon Sep 17 00:00:00 2001 From: Dave Thaler Date: Mon, 22 Jul 2024 14:13:22 -0700 Subject: [PATCH] Add missing nodes to various places in InferenceSystem Signed-off-by: Dave Thaler --- InferenceSystem/README.md | 2 +- .../Production/FastAI_LiveHLS_MastCenter.yml | 10 +++++ InferenceSystem/demos/hls_reader.py | 10 +++-- InferenceSystem/deploy/mast-center.yaml | 38 +++++++++++++++++++ InferenceSystem/deploy/north-sjc.yaml | 38 +++++++++++++++++++ .../src/LiveInferenceOrchestrator.py | 16 ++++---- .../src/PrepareDataForPredictionExplorer.py | 3 +- InferenceSystem/src/globals.py | 6 ++- InferenceSystem/src/model/datautils.py | 1 + 9 files changed, 110 insertions(+), 14 deletions(-) create mode 100644 InferenceSystem/config/Production/FastAI_LiveHLS_MastCenter.yml create mode 100644 InferenceSystem/deploy/mast-center.yaml create mode 100644 InferenceSystem/deploy/north-sjc.yaml diff --git a/InferenceSystem/README.md b/InferenceSystem/README.md index f2fb087f..9c9ac248 100644 --- a/InferenceSystem/README.md +++ b/InferenceSystem/README.md @@ -176,7 +176,7 @@ docker build . -t live-inference-system -f ./Dockerfile Note: the config used in the Dockerfile is a Production config. -TODO: fix. For now, you will have to manually create 5 different docker containers for the 5 hydrophone locations. Each time you will need to edit the Dockerfile and replace the config for each hydrophone location (OrcasoundLab, BushPoint, PortTownsend, Sunset Bay and Point Robinson). +TODO: fix. For now, you will have to manually create a different docker container for each hydrophone location. Each time you will need to edit the Dockerfile and replace the config for each hydrophone location. ## Running the docker container diff --git a/InferenceSystem/config/Production/FastAI_LiveHLS_MastCenter.yml b/InferenceSystem/config/Production/FastAI_LiveHLS_MastCenter.yml new file mode 100644 index 00000000..a7e68fd7 --- /dev/null +++ b/InferenceSystem/config/Production/FastAI_LiveHLS_MastCenter.yml @@ -0,0 +1,10 @@ +model_type: "FastAI" +model_local_threshold: 0.5 +model_global_threshold: 3 +model_path: "./model" +model_name: "model.pkl" +hls_stream_type: "LiveHLS" +hls_polling_interval: 60 +hls_hydrophone_id: "rpi_mast_center" +upload_to_azure: True +delete_local_wavs: True diff --git a/InferenceSystem/demos/hls_reader.py b/InferenceSystem/demos/hls_reader.py index 47a0fa64..339d9a47 100644 --- a/InferenceSystem/demos/hls_reader.py +++ b/InferenceSystem/demos/hls_reader.py @@ -17,10 +17,14 @@ import shutil import spectrogram_visualizer +# TODO: get the list from https://live.orcasound.net/api/json/feeds ORCASOUND_STREAMS = { - # 'OrcasoundLab': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_orcasound_lab' 'BushPoint': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_bush_point' - # 'PortTownsend': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_port_townsend' + 'MaSTCenter': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_mast_center' + 'NorthSanJuanChannel': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_north_sjc' + 'OrcasoundLab': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_orcasound_lab' + 'PointRobinson': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_point_robinson' + 'PortTownsend': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_port_townsend' 'SunsetBay': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_sunset_bay' } @@ -134,4 +138,4 @@ def download_hls_segment_and_predict(counter, stream_url, stream_name, wav_durat shutil.copy(wav_file_path, wav_player_path) spectrogram_visualizer.write_annotations_on_spectrogram(wav_player_path, clipname, result_json, spec_player_path) - return counter+1 \ No newline at end of file + return counter+1 diff --git a/InferenceSystem/deploy/mast-center.yaml b/InferenceSystem/deploy/mast-center.yaml new file mode 100644 index 00000000..fb39fda8 --- /dev/null +++ b/InferenceSystem/deploy/mast-center.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: inference-system + namespace: mast-center +spec: + replicas: 1 + selector: + matchLabels: + app: inference-system + template: + metadata: + labels: + app: inference-system + spec: + containers: + - name: inference-system + image: orcaconservancycr.azurecr.io/live-inference-system:11-15-20.FastAI.R1-12.MaSTCenter.v0 + resources: + limits: + cpu: 1 + memory: 3G + env: + - name: AZURE_COSMOSDB_PRIMARY_KEY + valueFrom: + secretKeyRef: + name: inference-system + key: AZURE_COSMOSDB_PRIMARY_KEY + - name: AZURE_STORAGE_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: inference-system + key: AZURE_STORAGE_CONNECTION_STRING + - name: INFERENCESYSTEM_APPINSIGHTS_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: inference-system + key: INFERENCESYSTEM_APPINSIGHTS_CONNECTION_STRING diff --git a/InferenceSystem/deploy/north-sjc.yaml b/InferenceSystem/deploy/north-sjc.yaml new file mode 100644 index 00000000..01b7ebb5 --- /dev/null +++ b/InferenceSystem/deploy/north-sjc.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: inference-system + namespace: north-sjc +spec: + replicas: 1 + selector: + matchLabels: + app: inference-system + template: + metadata: + labels: + app: inference-system + spec: + containers: + - name: inference-system + image: orcaconservancycr.azurecr.io/live-inference-system:11-15-20.FastAI.R1-12.NorthSJC.v0 + resources: + limits: + cpu: 1 + memory: 3G + env: + - name: AZURE_COSMOSDB_PRIMARY_KEY + valueFrom: + secretKeyRef: + name: inference-system + key: AZURE_COSMOSDB_PRIMARY_KEY + - name: AZURE_STORAGE_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: inference-system + key: AZURE_STORAGE_CONNECTION_STRING + - name: INFERENCESYSTEM_APPINSIGHTS_CONNECTION_STRING + valueFrom: + secretKeyRef: + name: inference-system + key: INFERENCESYSTEM_APPINSIGHTS_CONNECTION_STRING diff --git a/InferenceSystem/src/LiveInferenceOrchestrator.py b/InferenceSystem/src/LiveInferenceOrchestrator.py index 1d22f94d..6c9e7bf1 100644 --- a/InferenceSystem/src/LiveInferenceOrchestrator.py +++ b/InferenceSystem/src/LiveInferenceOrchestrator.py @@ -33,13 +33,15 @@ COSMOSDB_DATABASE_NAME = "predictions" COSMOSDB_CONTAINER_NAME = "metadata" -ORCASOUND_LAB_LOCATION = {"id": "rpi_orcasound_lab", "name": "Haro Strait", "longitude": -123.17357, "latitude": 48.55833} -PORT_TOWNSEND_LOCATION = {"id": "rpi_port_townsend", "name": "Port Townsend", "longitude": -122.76045, "latitude": 48.13569} -BUSH_POINT_LOCATION = {"id": "rpi_bush_point", "name": "Bush Point", "longitude": -122.6039, "latitude": 48.03371} -SUNSET_BAY_LOCATION = {"id": "rpi_sunset_bay", "name": "Sunset Bay", "longitude": -122.3339, "latitude": 47.86497} -POINT_ROBINSON_LOCATION = {"id": "rpi_point_robinson", "name": "Point Robinson", "longitude": -122.37267, "latitude": 47.38838} - -source_guid_to_location = {"rpi_orcasound_lab" : ORCASOUND_LAB_LOCATION, "rpi_port_townsend" : PORT_TOWNSEND_LOCATION, "rpi_bush_point": BUSH_POINT_LOCATION, "rpi_sunset_bay": SUNSET_BAY_LOCATION, "rpi_point_robinson": POINT_ROBINSON_LOCATION } +# TODO: get this data from https://live.orcasound.net/api/json/feeds +BUSH_POINT_LOCATION = {"id": "rpi_bush_point", "name": "Bush Point", "longitude": -122.6040035, "latitude": 48.0336664} +NORTH_SAN_JUAN_CHANNEL_LOCATION = {"id": "rpi_north_sjc", "name": "North San Juan Channel", "longitude": -123.058779, "latitude": 48.591294} +ORCASOUND_LAB_LOCATION = {"id": "rpi_orcasound_lab", "name": "Orcasound Lab", "longitude": -123.1735774, "latitude": 48.5583362} +POINT_ROBINSON_LOCATION = {"id": "rpi_point_robinson", "name": "Point Robinson", "longitude": -122.37267, "latitude": 47.388383} +PORT_TOWNSEND_LOCATION = {"id": "rpi_port_townsend", "name": "Port Townsend", "longitude": -122.760614, "latitude": 48.135743} +SUNSET_BAY_LOCATION = {"id": "rpi_sunset_bay", "name": "Sunset Bay", "longitude": -122.33393605795372, "latitude": 47.86497296593844} + +source_guid_to_location = {"rpi_bush_point": BUSH_POINT_LOCATION, "rpi_north_sjc": NORTH_SAN_JUAN_CHANNEL_LOCATION, "rpi_orcasound_lab" : ORCASOUND_LAB_LOCATION, "rpi_point_robinson": POINT_ROBINSON_LOCATION, "rpi_port_townsend" : PORT_TOWNSEND_LOCATION, "rpi_sunset_bay": SUNSET_BAY_LOCATION} def assemble_blob_uri(container_name, item_name): diff --git a/InferenceSystem/src/PrepareDataForPredictionExplorer.py b/InferenceSystem/src/PrepareDataForPredictionExplorer.py index d8d8ff20..6ecff216 100644 --- a/InferenceSystem/src/PrepareDataForPredictionExplorer.py +++ b/InferenceSystem/src/PrepareDataForPredictionExplorer.py @@ -203,8 +203,9 @@ def main(): help="Start time in PST in following format 2020-07-25 19:15") parser.add_argument("--end_time", type=str, required=True, \ help="End time in PST in following format 2020-07-25 20:15") + # TODO: get list of streams from https://live.orcasound.net/api/json/feeds instead of hard coding it parser.add_argument("--s3_stream", type=str, required=True, \ - help="Hydrophone stream (orcasound_lab/port_townsend/bush_point)") + help="Hydrophone stream (bush_point/mast_center/north_sjc/orcasound_lab/point_robinson/port_townsend/sunset_bay)") parser.add_argument("--model_path", type=str, required=True, \ help="Path to the model folder that contains weights and mean, invstd") parser.add_argument("--annotation_threshold", type=float, required=True, \ diff --git a/InferenceSystem/src/globals.py b/InferenceSystem/src/globals.py index dbd89a2c..87d9c402 100644 --- a/InferenceSystem/src/globals.py +++ b/InferenceSystem/src/globals.py @@ -1,11 +1,13 @@ import datetime S3_STREAM_URLS = { + "bush_point": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_bush_point", + "mast_center": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_mast_center", + "north_sjc": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_north_sjc", "orcasound_lab": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_orcasound_lab", + "point_robinson": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_point_robinson", "port_townsend": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_port_townsend", - "bush_point": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_bush_point", "sunset_bay": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_sunset_bay", - "point_robinson": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_point_robinson", } # Limits time window (end - start) of negative samples to be downloaded for retraining diff --git a/InferenceSystem/src/model/datautils.py b/InferenceSystem/src/model/datautils.py index 8bfef95c..fb1f7d1f 100644 --- a/InferenceSystem/src/model/datautils.py +++ b/InferenceSystem/src/model/datautils.py @@ -173,6 +173,7 @@ def download_hls_segment(stream_urls,tmp_root,output_root): def test_hls_download(): + # These URLs are only two of a longer list. make_osl_url = lambda x: "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_orcasound_lab/hls/{}/live.m3u8".format(x) make_bush_url = lambda x: "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_bush_point/hls/{}/live.m3u8".format(x)