Add WebRTC support, drop UDPTunnel support
This commit is contained in:
parent
b2031ea552
commit
bc83d62339
4 changed files with 51 additions and 77 deletions
50
app/index.js
50
app/index.js
|
@ -1,10 +1,8 @@
|
|||
import 'stream-browserify' // see https://github.com/ericgundrum/pouch-websocket-sync-example/commit/2a4437b013092cc7b2cd84cf1499172c84a963a3
|
||||
import 'subworkers' // polyfill for https://bugs.chromium.org/p/chromium/issues/detail?id=31666
|
||||
import url from 'url'
|
||||
import ByteBuffer from 'bytebuffer'
|
||||
import MumbleClient from 'mumble-client'
|
||||
import WorkerBasedMumbleConnector from './worker-client'
|
||||
import BufferQueueNode from 'web-audio-buffer-queue'
|
||||
import mumbleConnect from 'mumble-client-websocket'
|
||||
import audioContext from 'audio-context'
|
||||
import ko from 'knockout'
|
||||
import _dompurify from 'dompurify'
|
||||
|
@ -267,7 +265,7 @@ class GlobalBindings {
|
|||
constructor (config) {
|
||||
this.config = config
|
||||
this.settings = new Settings(config.settings)
|
||||
this.connector = new WorkerBasedMumbleConnector()
|
||||
this.connector = { connect: mumbleConnect }
|
||||
this.client = null
|
||||
this.userContextMenu = new ContextMenu()
|
||||
this.channelContextMenu = new ContextMenu()
|
||||
|
@ -339,14 +337,15 @@ class GlobalBindings {
|
|||
|
||||
log('Connecting to server ', host)
|
||||
|
||||
// Note: This call needs to be delayed until the user has interacted with
|
||||
// the page in some way (which at this point they have), see: https://goo.gl/7K7WLu
|
||||
this.connector.setSampleRate(audioContext().sampleRate)
|
||||
|
||||
// TODO: token
|
||||
this.connector.connect(`wss://${host}:${port}`, {
|
||||
username: username,
|
||||
password: password
|
||||
password: password,
|
||||
webrtc: {
|
||||
enabled: true,
|
||||
mic: micStream,
|
||||
audioContext: audioContext()
|
||||
}
|
||||
}).done(client => {
|
||||
log('Connected!')
|
||||
|
||||
|
@ -563,24 +562,18 @@ class GlobalBindings {
|
|||
}
|
||||
}).on('voice', stream => {
|
||||
console.log(`User ${user.username} started takling`)
|
||||
var userNode = new BufferQueueNode({
|
||||
audioContext: audioContext()
|
||||
})
|
||||
userNode.connect(audioContext().destination)
|
||||
|
||||
if (stream.target === 'normal') {
|
||||
ui.talking('on')
|
||||
} else if (stream.target === 'shout') {
|
||||
ui.talking('shout')
|
||||
} else if (stream.target === 'whisper') {
|
||||
ui.talking('whisper')
|
||||
}
|
||||
stream.on('data', data => {
|
||||
if (data.target === 'normal') {
|
||||
ui.talking('on')
|
||||
} else if (data.target === 'shout') {
|
||||
ui.talking('shout')
|
||||
} else if (data.target === 'whisper') {
|
||||
ui.talking('whisper')
|
||||
}
|
||||
userNode.write(data.buffer)
|
||||
// mumble-client is in WebRTC mode, no pcm data should arrive this way
|
||||
}).on('end', () => {
|
||||
console.log(`User ${user.username} stopped takling`)
|
||||
ui.talking('off')
|
||||
userNode.end()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -929,7 +922,9 @@ window.onload = function () {
|
|||
req.send()
|
||||
}
|
||||
ui.connectDialog.joinOnly(useJoinDialog)
|
||||
ko.applyBindings(ui)
|
||||
userMediaPromise.then(() => {
|
||||
ko.applyBindings(ui)
|
||||
})
|
||||
}
|
||||
|
||||
window.onresize = () => ui.updateSize()
|
||||
|
@ -981,10 +976,11 @@ function userToState () {
|
|||
return flags.join(', ')
|
||||
}
|
||||
|
||||
var micStream
|
||||
var voiceHandler
|
||||
var testVoiceHandler
|
||||
|
||||
initVoice(data => {
|
||||
var userMediaPromise = initVoice(data => {
|
||||
if (testVoiceHandler) {
|
||||
testVoiceHandler.write(data)
|
||||
}
|
||||
|
@ -996,6 +992,8 @@ initVoice(data => {
|
|||
} else if (voiceHandler) {
|
||||
voiceHandler.write(data)
|
||||
}
|
||||
}).then(userMedia => {
|
||||
micStream = userMedia
|
||||
}, err => {
|
||||
log('Cannot initialize user media. Microphone will not work:', err)
|
||||
window.alert('Failed to initialize user media\nRefresh page to retry.\n' + err)
|
||||
})
|
||||
|
|
23
app/voice.js
23
app/voice.js
|
@ -1,7 +1,6 @@
|
|||
import { Writable } from 'stream'
|
||||
import MicrophoneStream from 'microphone-stream'
|
||||
import audioContext from 'audio-context'
|
||||
import getUserMedia from 'getusermedia'
|
||||
import keyboardjs from 'keyboardjs'
|
||||
import vad from 'voice-activity-detection'
|
||||
import DropStream from 'drop-stream'
|
||||
|
@ -33,8 +32,7 @@ class VoiceHandler extends Writable {
|
|||
return this._outbound
|
||||
}
|
||||
|
||||
// Note: the samplesPerPacket argument is handled in worker.js and not passed on
|
||||
this._outbound = this._client.createVoiceStream(this._settings.samplesPerPacket)
|
||||
this._outbound = this._client.createVoiceStream()
|
||||
|
||||
this.emit('started_talking')
|
||||
}
|
||||
|
@ -160,16 +158,13 @@ export class VADVoiceHandler extends VoiceHandler {
|
|||
|
||||
var theUserMedia = null
|
||||
|
||||
export function initVoice (onData, onUserMediaError) {
|
||||
getUserMedia({ audio: true }, (err, userMedia) => {
|
||||
if (err) {
|
||||
onUserMediaError(err)
|
||||
} else {
|
||||
theUserMedia = userMedia
|
||||
var micStream = new MicrophoneStream(userMedia, { objectMode: true, bufferSize: 1024 })
|
||||
micStream.on('data', data => {
|
||||
onData(Buffer.from(data.getChannelData(0).buffer))
|
||||
})
|
||||
}
|
||||
export function initVoice (onData) {
|
||||
return window.navigator.mediaDevices.getUserMedia({ audio: true }).then((userMedia) => {
|
||||
theUserMedia = userMedia
|
||||
var micStream = new MicrophoneStream(userMedia, { objectMode: true, bufferSize: 1024 })
|
||||
micStream.on('data', data => {
|
||||
onData(Buffer.from(data.getChannelData(0).buffer))
|
||||
})
|
||||
return userMedia
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue