Add WebRTC support, drop UDPTunnel support

This commit is contained in:
Jonas Herzig 2018-12-02 18:32:53 +01:00
parent b2031ea552
commit bc83d62339
4 changed files with 51 additions and 77 deletions

View file

@ -1,15 +1,16 @@
# mumble-web
Note: This WebRTC branch is not backwards compatible with the current release, i.e. it expects the server/proxy to support WebRTC which neither websockify nor Grumble do. Also note that it requires an extension to the Mumble protocol which has not yet been stabilized and as such may change at any time, so make sure to keep mumble-web and mumble-web-proxy in sync.
mumble-web is an HTML5 [Mumble] client for use in modern browsers.
A live demo is running [here](https://voice.johni0702.de/?address=voice.johni0702.de&port=443/demo).
A live demo is running [here](https://voice.johni0702.de/webrtc/?address=voice.johni0702.de&port=443/demo).
The Mumble protocol uses TCP for control and UDP for voice.
Running in a browser, both are unavailable to this client.
Instead Websockets are used for all communications.
Instead Websockets are used for control and WebRTC is used for voice.
libopus, libcelt (0.7.1) and libsamplerate, compiled to JS via emscripten, are used for audio decoding.
Therefore, at the moment only the Opus and CELT Alpha codecs are supported.
Therefore, only the Opus codec is supported.
Quite a few features, most noticeably all
administrative functionallity, are still missing.
@ -18,10 +19,10 @@ administrative functionallity, are still missing.
#### Download
mumble-web can either be installed directly from npm with `npm install -g mumble-web`
or from git:
or from git (webrtc branch only from git for now):
```
git clone https://github.com/johni0702/mumble-web
git clone -b webrtc https://github.com/johni0702/mumble-web
cd mumble-web
npm install
npm run build
@ -34,30 +35,11 @@ Either way you will end up with a `dist` folder that contains the static page.
#### Setup
At the time of writing this there do not seem to be any Mumble servers
which natively support Websockets. To use this client with any standard mumble
server, websockify must be set up (preferably on the same machine that the
which natively support Websockets+WebRTC. To use this client with any standard mumble
server, [mumble-web-proxy] must be set up (preferably on the same machine that the
Mumble server is running on).
You can install websockify via your package manager `apt install websockify` or
manually from the [websockify GitHub page]. Note that while some versions might
function better than others, the python version generally seems to be the best.
There are two basic ways you can use websockify with mumble-web:
- Standalone, use websockify for both, websockets and serving static files
- Proxied, let your favorite web server serve static files and proxy websocket connections to websockify
##### Standalone
This is the simplest but at the same time least flexible configuration.
```
websockify --cert=mycert.crt --key=mykey.key --ssl-only --ssl-target --web=path/to/dist 443 mumbleserver:64738
```
##### Proxied
This configuration allows you to run websockify on a machine that already has
another webserver running.
```
websockify --ssl-target 64737 mumbleserver:64738
```
Additionally you will need some web server to serve static files and terminate the secure websocket connection (mumble-web-proxy only supports insecure ones).
A sample configuration for nginx that allows access to mumble-web at
`https://voice.example.com/` and connecting at `wss://voice.example.com/demo`
@ -73,7 +55,7 @@ server {
root /path/to/dist;
}
location /demo {
proxy_pass http://websockify:64737;
proxy_pass http://proxybox:64737;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
@ -85,6 +67,11 @@ map $http_upgrade $connection_upgrade {
'' close;
}
```
where `proxybox` is the machine running mumble-web-proxy (may be `localhost`):
```
mumble-web-proxy --listen-ws 64737 --server mumbleserver:64738
```
If your mumble-web-proxy is running behind a NAT or firewall, take note of the respective section in its README.
### Configuration
The `app/config.js` file contains default values and descriptions for all configuration options.
@ -133,6 +120,6 @@ See [here](https://docs.google.com/document/d/1uPF7XWY_dXTKVKV7jZQ2KmsI19wn9-kFR
ISC
[Mumble]: https://wiki.mumble.info/wiki/Main_Page
[websockify GitHub page]: https://github.com/novnc/websockify
[mumble-web-proxy]: https://github.com/johni0702/mumble-web-proxy
[MetroMumble]: https://github.com/xPoke/MetroMumble
[Matrix]: https://matrix.org

View file

@ -1,10 +1,8 @@
import 'stream-browserify' // see https://github.com/ericgundrum/pouch-websocket-sync-example/commit/2a4437b013092cc7b2cd84cf1499172c84a963a3
import 'subworkers' // polyfill for https://bugs.chromium.org/p/chromium/issues/detail?id=31666
import url from 'url'
import ByteBuffer from 'bytebuffer'
import MumbleClient from 'mumble-client'
import WorkerBasedMumbleConnector from './worker-client'
import BufferQueueNode from 'web-audio-buffer-queue'
import mumbleConnect from 'mumble-client-websocket'
import audioContext from 'audio-context'
import ko from 'knockout'
import _dompurify from 'dompurify'
@ -267,7 +265,7 @@ class GlobalBindings {
constructor (config) {
this.config = config
this.settings = new Settings(config.settings)
this.connector = new WorkerBasedMumbleConnector()
this.connector = { connect: mumbleConnect }
this.client = null
this.userContextMenu = new ContextMenu()
this.channelContextMenu = new ContextMenu()
@ -339,14 +337,15 @@ class GlobalBindings {
log('Connecting to server ', host)
// Note: This call needs to be delayed until the user has interacted with
// the page in some way (which at this point they have), see: https://goo.gl/7K7WLu
this.connector.setSampleRate(audioContext().sampleRate)
// TODO: token
this.connector.connect(`wss://${host}:${port}`, {
username: username,
password: password
password: password,
webrtc: {
enabled: true,
mic: micStream,
audioContext: audioContext()
}
}).done(client => {
log('Connected!')
@ -563,24 +562,18 @@ class GlobalBindings {
}
}).on('voice', stream => {
console.log(`User ${user.username} started takling`)
var userNode = new BufferQueueNode({
audioContext: audioContext()
})
userNode.connect(audioContext().destination)
if (stream.target === 'normal') {
ui.talking('on')
} else if (stream.target === 'shout') {
ui.talking('shout')
} else if (stream.target === 'whisper') {
ui.talking('whisper')
}
stream.on('data', data => {
if (data.target === 'normal') {
ui.talking('on')
} else if (data.target === 'shout') {
ui.talking('shout')
} else if (data.target === 'whisper') {
ui.talking('whisper')
}
userNode.write(data.buffer)
// mumble-client is in WebRTC mode, no pcm data should arrive this way
}).on('end', () => {
console.log(`User ${user.username} stopped takling`)
ui.talking('off')
userNode.end()
})
})
}
@ -929,7 +922,9 @@ window.onload = function () {
req.send()
}
ui.connectDialog.joinOnly(useJoinDialog)
ko.applyBindings(ui)
userMediaPromise.then(() => {
ko.applyBindings(ui)
})
}
window.onresize = () => ui.updateSize()
@ -981,10 +976,11 @@ function userToState () {
return flags.join(', ')
}
var micStream
var voiceHandler
var testVoiceHandler
initVoice(data => {
var userMediaPromise = initVoice(data => {
if (testVoiceHandler) {
testVoiceHandler.write(data)
}
@ -996,6 +992,8 @@ initVoice(data => {
} else if (voiceHandler) {
voiceHandler.write(data)
}
}).then(userMedia => {
micStream = userMedia
}, err => {
log('Cannot initialize user media. Microphone will not work:', err)
window.alert('Failed to initialize user media\nRefresh page to retry.\n' + err)
})

View file

@ -1,7 +1,6 @@
import { Writable } from 'stream'
import MicrophoneStream from 'microphone-stream'
import audioContext from 'audio-context'
import getUserMedia from 'getusermedia'
import keyboardjs from 'keyboardjs'
import vad from 'voice-activity-detection'
import DropStream from 'drop-stream'
@ -33,8 +32,7 @@ class VoiceHandler extends Writable {
return this._outbound
}
// Note: the samplesPerPacket argument is handled in worker.js and not passed on
this._outbound = this._client.createVoiceStream(this._settings.samplesPerPacket)
this._outbound = this._client.createVoiceStream()
this.emit('started_talking')
}
@ -160,16 +158,13 @@ export class VADVoiceHandler extends VoiceHandler {
var theUserMedia = null
export function initVoice (onData, onUserMediaError) {
getUserMedia({ audio: true }, (err, userMedia) => {
if (err) {
onUserMediaError(err)
} else {
theUserMedia = userMedia
var micStream = new MicrophoneStream(userMedia, { objectMode: true, bufferSize: 1024 })
micStream.on('data', data => {
onData(Buffer.from(data.getChannelData(0).buffer))
})
}
export function initVoice (onData) {
return window.navigator.mediaDevices.getUserMedia({ audio: true }).then((userMedia) => {
theUserMedia = userMedia
var micStream = new MicrophoneStream(userMedia, { objectMode: true, bufferSize: 1024 })
micStream.on('data', data => {
onData(Buffer.from(data.getChannelData(0).buffer))
})
return userMedia
})
}

View file

@ -30,7 +30,6 @@
"duplex-maker": "^1.0.0",
"extract-loader": "^0.1.0",
"file-loader": "^0.9.0",
"getusermedia": "^2.0.0",
"html-loader": "^0.4.4",
"json-loader": "^0.5.4",
"keyboardjs": "^2.3.4",
@ -42,16 +41,11 @@
"regexp-replace-loader": "0.0.1",
"sass-loader": "^4.1.1",
"stream-chunker": "^1.2.8",
"subworkers": "^1.0.1",
"to-arraybuffer": "^1.0.1",
"transform-loader": "^0.2.3",
"voice-activity-detection": "johni0702/voice-activity-detection#9f8bd90",
"webpack": "^1.13.3",
"webworkify-webpack": "^1.1.8",
"libsamplerate.js": "^1.0.0",
"mumble-client-codecs-browser": "^1.2.0",
"mumble-client-websocket": "^1.0.0",
"mumble-client": "^1.3.0",
"web-audio-buffer-queue": "^1.1.0"
"mumble-client": "github:johni0702/mumble-client#8124ee7"
}
}