This repository has been archived by the owner on Oct 3, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 16
/
audiomark.py
98 lines (71 loc) · 3.37 KB
/
audiomark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
""" A very rough modification of https://github.com/CrowdCurio/audio-annotator."""
import json
import glob
import mimetypes
import os
import numpy as np
import pandas as pd
from flask import Flask, Response, request, send_file
from flask import session, redirect, url_for, jsonify, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/next', methods=['GET'])
def next_task():
meta = pd.read_csv('/volatile/dcase17_1/meta.txt', sep='\t',
names=['file', 'scene', 'recording'])
clues = pd.read_csv('/volatile/dcase17_1/clues.txt', sep='\t',
names=['file', 'start', 'end', 'label'])
meta = meta.sample(frac=1, random_state=20170713).reset_index(drop=True)
completed = pd.unique(clues['file'])
meta = meta[~meta['file'].isin(completed)]
meta = meta[(meta['scene'] == 'train')].reset_index(drop=True)
print('Rows: ', len(meta))
# idx = np.random.randint(len(meta))
idx = 0
audio_file = meta.ix[idx, 0].replace('audio/', '')
audio_scene = meta.ix[idx, 1]
audio_prediction = meta.ix[idx, 2]
# audio_file = os.path.basename(np.random.choice(glob.glob('data/audio/*')))
# audio_scene = meta[meta['file'] == 'audio/' + audio_file].scene.values[0]
task = dict(feedback="none",
visualization='spectrogram',
proximityTag=[],
annotationTag=['dishes', 'page_flip'],
url="/static/data/audio/" + audio_file,
numRecordings='?',
file=audio_file,
info='<strong>{}</strong> (pred: {})<br />{}'.format(audio_scene, audio_prediction, audio_file), # noqa
tutorialVideoURL="https://www.youtube.com/embed/Bg8-83heFRM",
alwaysShowTags=True)
data = json.dumps(dict(task=task))
# app.logger.debug("Returning:\n{}".format(data))
resp = Response(data)
return resp
@app.route('/api/submit', methods=['POST'])
def save_annotation():
if request.headers['Content-Type'] == 'application/json':
# app.logger.info("Received Annotation:\n{}".format(json.dumps(request.json, indent=2)))
clues = pd.read_csv('/volatile/dcase17_1/clues.txt', sep='\t',
names=['file', 'start', 'end', 'label'])
file = 'audio/' + request.json['file']
for annotation in request.json['annotations']:
start = np.round(annotation['start'], 2)
end = np.round(annotation['end'], 2)
label = annotation['annotation']
# app.logger.info('{},{},{},{}'.format(file, start, end, label))
row = pd.DataFrame(columns=('file', 'start', 'end', 'label'))
row.loc[0] = (file, start, end, label)
# print(row)
clues = clues.append(row, ignore_index=True)
if not len(request.json['annotations']):
row = pd.DataFrame(columns=('file', 'start', 'end', 'label'))
row.loc[0] = (file, 0.0, 0.0, 'none')
clues = clues.append(row, ignore_index=True)
print(clues.groupby(['label']).aggregate('count'))
clues.to_csv('/volatile/dcase17_1/clues.txt', sep='\t', header=False, index=False)
data = json.dumps(dict(message='Success!'))
status = 200
resp = Response(data, status=status, mimetype=mimetypes.types_map[".json"])
return resp