-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.js
114 lines (100 loc) · 3.17 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
const fs = require("fs");
const util = require('util');
const { Readable } = require('stream');
const express = require('express');
const app = express();
const http = require('http').createServer(app);
const io = require('socket.io')(http, {
path: "/audio/socket.io",
cors: {
origin: "*"
},
});
const cors = require('cors');
app.use(cors())
const port = 8002;
const host = '0.0.0.0'; // or localhost
http.listen(port, host, () => {
console.log('listening on *:' + port);
});
app.use('/audio/', express.static('public'))
app.use(function(req, res) {
console.error(404, req.url)
res.status(404)
res.send('404 not found.')
})
app.use(function(err, req, res, next) {
console.error(err)
res.status(500);
res.send('500 something went wrong')
})
io.of('/').on('connection', function(socket) {
socket.on('stream', async function(data) {
// console.log(data.buffer.length)
// write_output_mp3(data) // for debugging purposes
const {buffer, id} = data;
let out = await transcribe_witai(buffer)
socket.emit('text', {raw:out, nlp:nlp(out), id})
});
});
io.on('connection', (socket) => {
// console.log('connection');
socket.on('disconnect', () => {
// console.log('disconnect');
});
});
const moment = require('moment');
const lame = require("@suldashi/lame");
const path = require('path');
async function write_output_mp3(buffer) {
let encoder = new lame.Encoder({
// input
channels: 1, // 2 channels (left and right)
bitDepth: 16, // 16-bit samples
sampleRate: 44100, // 44,100 Hz sample rate
// output
mode: lame.MONO // STEREO (default), JOINTSTEREO, DUALCHANNEL or MONO
});
Readable.from(buffer).pipe(encoder)
.pipe(fs.createWriteStream(path.resolve(__dirname, 'public/sample_pcm.mp3')));
}
const { wordsToNumbers } = require('words-to-numbers');
const chrono = require('chrono-node');
function nlp(text){
text = wordsToNumbers(text)
return chrono.parseDate(text);
}
// WitAI
let witAI_lastcallTS = null;
const {WITAPIKEY} = require('./config.json');
const witClient = require('node-witai-speech');
async function sleep(ms) {
await new Promise(resolve => setTimeout(resolve, ms));
}
async function transcribe_witai(buffer) {
try {
// ensure we do not send more than one request per second
if (witAI_lastcallTS != null) {
let now = Math.floor(new Date());
while (now - witAI_lastcallTS < 1000) {
await sleep(100);
now = Math.floor(new Date());
}
}
} catch (e) {
console.log('transcribe_witai 837:' + e)
}
try {
const extractSpeechIntent = util.promisify(witClient.extractSpeechIntent);
let stream = Readable.from(buffer);
const contenttype = "audio/wav"
const output = await extractSpeechIntent(WITAPIKEY, stream, contenttype)
witAI_lastcallTS = Math.floor(new Date());
stream.destroy()
if (output && '_text' in output)
return output._text
if (output && 'text' in output)
return output.text
return output;
} catch (e) { console.log('transcribe_witai 851:' + e); console.log(e) }
}