Skip to content

Commit

Permalink
Add missing nodes to various places in InferenceSystem
Browse files Browse the repository at this point in the history
Signed-off-by: Dave Thaler <dthaler1968@gmail.com>
  • Loading branch information
dthaler committed Jul 22, 2024
1 parent 712c693 commit 982d284
Show file tree
Hide file tree
Showing 9 changed files with 110 additions and 14 deletions.
2 changes: 1 addition & 1 deletion InferenceSystem/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ docker build . -t live-inference-system -f ./Dockerfile

Note: the config used in the Dockerfile is a Production config.

TODO: fix. For now, you will have to manually create 5 different docker containers for the 5 hydrophone locations. Each time you will need to edit the Dockerfile and replace the config for each hydrophone location (OrcasoundLab, BushPoint, PortTownsend, Sunset Bay and Point Robinson).
TODO: fix. For now, you will have to manually create a different docker container for each hydrophone location. Each time you will need to edit the Dockerfile and replace the config for each hydrophone location.


## Running the docker container
Expand Down
10 changes: 10 additions & 0 deletions InferenceSystem/config/Production/FastAI_LiveHLS_MastCenter.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
model_type: "FastAI"
model_local_threshold: 0.5
model_global_threshold: 3
model_path: "./model"
model_name: "model.pkl"
hls_stream_type: "LiveHLS"
hls_polling_interval: 60
hls_hydrophone_id: "rpi_mast_center"
upload_to_azure: True
delete_local_wavs: True
10 changes: 7 additions & 3 deletions InferenceSystem/demos/hls_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,14 @@
import shutil
import spectrogram_visualizer

# TODO: get the list from https://live.orcasound.net/api/json/feeds
ORCASOUND_STREAMS = {
# 'OrcasoundLab': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_orcasound_lab'
'BushPoint': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_bush_point'
# 'PortTownsend': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_port_townsend'
'MaSTCenter': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_mast_center'
'NorthSanJuanChannel': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_north_sjc'
'OrcasoundLab': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_orcasound_lab'
'PointRobinson': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_point_robinson'
'PortTownsend': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_port_townsend'
'SunsetBay': 'https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_sunset_bay'
}

Expand Down Expand Up @@ -134,4 +138,4 @@ def download_hls_segment_and_predict(counter, stream_url, stream_name, wav_durat
shutil.copy(wav_file_path, wav_player_path)

spectrogram_visualizer.write_annotations_on_spectrogram(wav_player_path, clipname, result_json, spec_player_path)
return counter+1
return counter+1
38 changes: 38 additions & 0 deletions InferenceSystem/deploy/mast-center.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: inference-system
namespace: mast-center
spec:
replicas: 1
selector:
matchLabels:
app: inference-system
template:
metadata:
labels:
app: inference-system
spec:
containers:
- name: inference-system
image: orcaconservancycr.azurecr.io/live-inference-system:11-15-20.FastAI.R1-12.MaSTCenter.v0
resources:
limits:
cpu: 1
memory: 3G
env:
- name: AZURE_COSMOSDB_PRIMARY_KEY
valueFrom:
secretKeyRef:
name: inference-system
key: AZURE_COSMOSDB_PRIMARY_KEY
- name: AZURE_STORAGE_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: inference-system
key: AZURE_STORAGE_CONNECTION_STRING
- name: INFERENCESYSTEM_APPINSIGHTS_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: inference-system
key: INFERENCESYSTEM_APPINSIGHTS_CONNECTION_STRING
38 changes: 38 additions & 0 deletions InferenceSystem/deploy/north-sjc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: inference-system
namespace: north-sjc
spec:
replicas: 1
selector:
matchLabels:
app: inference-system
template:
metadata:
labels:
app: inference-system
spec:
containers:
- name: inference-system
image: orcaconservancycr.azurecr.io/live-inference-system:11-15-20.FastAI.R1-12.NorthSJC.v0
resources:
limits:
cpu: 1
memory: 3G
env:
- name: AZURE_COSMOSDB_PRIMARY_KEY
valueFrom:
secretKeyRef:
name: inference-system
key: AZURE_COSMOSDB_PRIMARY_KEY
- name: AZURE_STORAGE_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: inference-system
key: AZURE_STORAGE_CONNECTION_STRING
- name: INFERENCESYSTEM_APPINSIGHTS_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: inference-system
key: INFERENCESYSTEM_APPINSIGHTS_CONNECTION_STRING
16 changes: 9 additions & 7 deletions InferenceSystem/src/LiveInferenceOrchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,15 @@
COSMOSDB_DATABASE_NAME = "predictions"
COSMOSDB_CONTAINER_NAME = "metadata"

ORCASOUND_LAB_LOCATION = {"id": "rpi_orcasound_lab", "name": "Haro Strait", "longitude": -123.17357, "latitude": 48.55833}
PORT_TOWNSEND_LOCATION = {"id": "rpi_port_townsend", "name": "Port Townsend", "longitude": -122.76045, "latitude": 48.13569}
BUSH_POINT_LOCATION = {"id": "rpi_bush_point", "name": "Bush Point", "longitude": -122.6039, "latitude": 48.03371}
SUNSET_BAY_LOCATION = {"id": "rpi_sunset_bay", "name": "Sunset Bay", "longitude": -122.3339, "latitude": 47.86497}
POINT_ROBINSON_LOCATION = {"id": "rpi_point_robinson", "name": "Point Robinson", "longitude": -122.37267, "latitude": 47.38838}

source_guid_to_location = {"rpi_orcasound_lab" : ORCASOUND_LAB_LOCATION, "rpi_port_townsend" : PORT_TOWNSEND_LOCATION, "rpi_bush_point": BUSH_POINT_LOCATION, "rpi_sunset_bay": SUNSET_BAY_LOCATION, "rpi_point_robinson": POINT_ROBINSON_LOCATION }
# TODO: get this data from https://live.orcasound.net/api/json/feeds
BUSH_POINT_LOCATION = {"id": "rpi_bush_point", "name": "Bush Point", "longitude": -122.6040035, "latitude": 48.0336664}
NORTH_SAN_JUAN_CHANNEL_LOCATION = {"id": "rpi_north_sjc", "name": "North San Juan Channel", "longitude": -123.058779, "latitude": 48.591294}
ORCASOUND_LAB_LOCATION = {"id": "rpi_orcasound_lab", "name": "Orcasound Lab", "longitude": -123.1735774, "latitude": 48.5583362}
POINT_ROBINSON_LOCATION = {"id": "rpi_point_robinson", "name": "Point Robinson", "longitude": -122.37267, "latitude": 47.388383}
PORT_TOWNSEND_LOCATION = {"id": "rpi_port_townsend", "name": "Port Townsend", "longitude": -122.760614, "latitude": 48.135743}
SUNSET_BAY_LOCATION = {"id": "rpi_sunset_bay", "name": "Sunset Bay", "longitude": -122.33393605795372, "latitude": 47.86497296593844}

source_guid_to_location = {"rpi_bush_point": BUSH_POINT_LOCATION, "rpi_north_sjc": NORTH_SAN_JUAN_CHANNEL_LOCATION, "rpi_orcasound_lab" : ORCASOUND_LAB_LOCATION, "rpi_point_robinson": POINT_ROBINSON_LOCATION, "rpi_port_townsend" : PORT_TOWNSEND_LOCATION, "rpi_sunset_bay": SUNSET_BAY_LOCATION}

def assemble_blob_uri(container_name, item_name):

Expand Down
3 changes: 2 additions & 1 deletion InferenceSystem/src/PrepareDataForPredictionExplorer.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,9 @@ def main():
help="Start time in PST in following format 2020-07-25 19:15")
parser.add_argument("--end_time", type=str, required=True, \
help="End time in PST in following format 2020-07-25 20:15")
# TODO: get list of streams from https://live.orcasound.net/api/json/feeds instead of hard coding it
parser.add_argument("--s3_stream", type=str, required=True, \
help="Hydrophone stream (orcasound_lab/port_townsend/bush_point)")
help="Hydrophone stream (bush_point/mast_center/north_sjc/orcasound_lab/point_robinson/port_townsend/sunset_bay)")
parser.add_argument("--model_path", type=str, required=True, \
help="Path to the model folder that contains weights and mean, invstd")
parser.add_argument("--annotation_threshold", type=float, required=True, \
Expand Down
6 changes: 4 additions & 2 deletions InferenceSystem/src/globals.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import datetime

S3_STREAM_URLS = {
"bush_point": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_bush_point",
"mast_center": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_mast_center",
"north_sjc": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_north_sjc",
"orcasound_lab": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_orcasound_lab",
"point_robinson": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_point_robinson",
"port_townsend": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_port_townsend",
"bush_point": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_bush_point",
"sunset_bay": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_sunset_bay",
"point_robinson": "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_point_robinson",
}

# Limits time window (end - start) of negative samples to be downloaded for retraining
Expand Down
1 change: 1 addition & 0 deletions InferenceSystem/src/model/datautils.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ def download_hls_segment(stream_urls,tmp_root,output_root):


def test_hls_download():
# These URLs are only two of a longer list.
make_osl_url = lambda x: "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_orcasound_lab/hls/{}/live.m3u8".format(x)

make_bush_url = lambda x: "https://s3-us-west-2.amazonaws.com/streaming-orcasound-net/rpi_bush_point/hls/{}/live.m3u8".format(x)
Expand Down

0 comments on commit 982d284

Please sign in to comment.