Make sure the AudioContext is created only after user interaction

See https://goo.gl/7K7WLu
This commit is contained in:
Jonas Herzig 2018-10-07 23:28:37 +02:00
parent bf731c7812
commit dd0c7f32a7
4 changed files with 13 additions and 7 deletions

View file

@ -262,7 +262,7 @@ class Settings {
class GlobalBindings { class GlobalBindings {
constructor () { constructor () {
this.settings = new Settings() this.settings = new Settings()
this.connector = new WorkerBasedMumbleConnector(audioContext.sampleRate) this.connector = new WorkerBasedMumbleConnector()
this.client = null this.client = null
this.userContextMenu = new ContextMenu() this.userContextMenu = new ContextMenu()
this.channelContextMenu = new ContextMenu() this.channelContextMenu = new ContextMenu()
@ -334,6 +334,10 @@ class GlobalBindings {
log('Connecting to server ', host) log('Connecting to server ', host)
// Note: This call needs to be delayed until the user has interacted with
// the page in some way (which at this point they have), see: https://goo.gl/7K7WLu
this.connector.setSampleRate(audioContext().sampleRate)
// TODO: token // TODO: token
this.connector.connect(`wss://${host}:${port}`, { this.connector.connect(`wss://${host}:${port}`, {
username: username, username: username,
@ -555,9 +559,9 @@ class GlobalBindings {
}).on('voice', stream => { }).on('voice', stream => {
console.log(`User ${user.username} started takling`) console.log(`User ${user.username} started takling`)
var userNode = new BufferQueueNode({ var userNode = new BufferQueueNode({
audioContext: audioContext audioContext: audioContext()
}) })
userNode.connect(audioContext.destination) userNode.connect(audioContext().destination)
stream.on('data', data => { stream.on('data', data => {
if (data.target === 'normal') { if (data.target === 'normal') {

View file

@ -103,7 +103,7 @@ export class VADVoiceHandler extends VoiceHandler {
super(client, settings) super(client, settings)
let level = settings.vadLevel let level = settings.vadLevel
const self = this const self = this
this._vad = vad(audioContext, theUserMedia, { this._vad = vad(audioContext(), theUserMedia, {
onVoiceStart () { onVoiceStart () {
console.log('vad: start') console.log('vad: start')
self._active = true self._active = true

View file

@ -12,7 +12,7 @@ import worker from './worker'
* Only stuff which we need in mumble-web is proxied, i.e. this is not a generic solution. * Only stuff which we need in mumble-web is proxied, i.e. this is not a generic solution.
*/ */
class WorkerBasedMumbleConnector { class WorkerBasedMumbleConnector {
constructor (sampleRate) { constructor () {
this._worker = webworkify(worker) this._worker = webworkify(worker)
this._worker.addEventListener('message', this._onMessage.bind(this)) this._worker.addEventListener('message', this._onMessage.bind(this))
this._reqId = 1 this._reqId = 1
@ -20,7 +20,9 @@ class WorkerBasedMumbleConnector {
this._clients = {} this._clients = {}
this._nextVoiceId = 1 this._nextVoiceId = 1
this._voiceStreams = {} this._voiceStreams = {}
}
setSampleRate (sampleRate) {
this._postMessage({ this._postMessage({
method: '_init', method: '_init',
sampleRate: sampleRate sampleRate: sampleRate

View file

@ -16,7 +16,7 @@
], ],
"devDependencies": { "devDependencies": {
"audio-buffer-utils": "^3.1.2", "audio-buffer-utils": "^3.1.2",
"audio-context": "^0.1.0", "audio-context": "^1.0.3",
"babel-core": "^6.18.2", "babel-core": "^6.18.2",
"babel-loader": "^6.2.8", "babel-loader": "^6.2.8",
"babel-plugin-transform-runtime": "^6.15.0", "babel-plugin-transform-runtime": "^6.15.0",
@ -52,6 +52,6 @@
"mumble-client-codecs-browser": "^1.2.0", "mumble-client-codecs-browser": "^1.2.0",
"mumble-client-websocket": "^1.0.0", "mumble-client-websocket": "^1.0.0",
"mumble-client": "^1.3.0", "mumble-client": "^1.3.0",
"web-audio-buffer-queue": "^1.0.0" "web-audio-buffer-queue": "^1.1.0"
} }
} }