Skip to content
Snippets Groups Projects
Commit 056576c7 authored by Moritz Langenstein's avatar Moritz Langenstein
Browse files

(ml5717) Removed server.js and simplified socket.js to allow usage in express

parent 9fe0cd64
No related branches found
No related tags found
No related merge requests found
web: node server.js
\ No newline at end of file
# SignalBuddy
A scalable signaling server for WebRTC using socket.io, NodeJS cluster, and Redis. Made for use with [liowebrtc](https://github.com/lazorfuzz/liowebrtc) and [react-liowebrtc](https://github.com/lazorfuzz/react-liowebrtc).
## What is SignalBuddy?
SignalBuddy is an easy-to-scale signaling solution for WebRTC. SignalBuddy automatically detects and scales across the number of CPU cores in its environment. For instance, if the machine you're testing on has four cores, SignalBuddy will launch a cluster of four processes, each a separate instance of itself, all listening on the same port. Using Redis to store state, peers connected to different worker instances, even on different servers, will still be able to join the same rooms and broadcast data to one another.
## What is a signaling server?
WebRTC needs to be facilitated with signaling; a service that acts as a matchmaker for peers before they establish direct video/audio/data channels. Signaling can be done in any way, e.g. via good old fashioned carrier pigeons. Signaling services only need to fulfill the absolute minimal role of matchmaking peers, however SignalBuddy comes with some extra perks, like the ability to segment peers into rooms, and STUN/TURN server dispensing.
## Startup Guide
First, install and start Redis. Once Redis is listening on the default port (6379), `cd` into the signalbuddy project.
After cd'ing into the signalbuddy project:
```
npm install
```
Build the project:
```
npm build
```
Start in dev mode:
```
npm start
```
If all goes well, your console should display a message like this:
```
Listening at http://localhost:8888/
```
If you open the URL in a web browser, you should see an output displaying which worker process you're currently connected to. Exempli gratia:
```
worker: 4
```
## Production
To run in production mode:
```
NODE_ENV=production node dist/server.js
```
Most likely, you'll want the server to be secured with SSL. You can either pass in the paths to your key/cert through the environment variables PRIV_KEY and CERT:
```
NODE_ENV=production PRIV_KEY=/etc/example/privKey.pem CERT=/etc/example/cert.pem node dist/server.js
```
Or you can add your key/cert paths to the production.json file in the config folder.
To pass in the Redis endpoint and port, use REDIS_ENDPOINT and REDIS_PORT:
```
NODE_ENV=production REDIS_ENDPOINT=localhost REDIS_PORT=6379 node dist/server.js
```
As with keys and certs, you can also add Redis endpoint details to your JSON config files.
---
This project is still in development. In the near future I will add a Dockerfile, as well as additional deployment methods and helpers that will make it easier to deploy on AWS, Digital Ocean, etc.
{
"isDev": true,
"server": {
"port": 8888,
"/* secure */": "/* whether this connects via https */",
"secure": false,
"key": null,
"cert": null,
"password": null
},
"redis": {
"endpoint": "localhost",
"port": 6379
},
"rooms": {
"/* maxClients */": "/* maximum number of clients per room. 0 = no limit */",
"maxClients": 0
},
"stunservers": [
{
"urls": "stun:stun.l.google.com:19302"
}
],
"turnservers": [
{
"urls": ["turn:your.turn.servers.here"],
"secret": "turnserversharedsecret",
"expiry": 86400
}
]
}
{
"isDev": true,
"server": {
"port": 443,
"secure": true,
"key": null,
"cert": null
},
"redis": {
"endpoint": "localhost",
"port": 6379
},
"rooms": {
"/* maxClients */": "/* maximum number of clients per room. 0 = no limit */",
"maxClients": 0
},
"stunservers": [
{
"urls": "stun:stun.l.google.com:19302"
}
],
"turnservers": []
}
'use strict';
var _getconfig = require('getconfig');
var _getconfig2 = _interopRequireDefault(_getconfig);
var _fs = require('fs');
var _fs2 = _interopRequireDefault(_fs);
var _os = require('os');
var _os2 = _interopRequireDefault(_os);
var _stickySession = require('sticky-session');
var _stickySession2 = _interopRequireDefault(_stickySession);
var _farmhash = require('farmhash');
var _farmhash2 = _interopRequireDefault(_farmhash);
var _net = require('net');
var _net2 = _interopRequireDefault(_net);
var _cluster = require('cluster');
var _cluster2 = _interopRequireDefault(_cluster);
var _http = require('http');
var _http2 = _interopRequireDefault(_http);
var _https = require('https');
var _https2 = _interopRequireDefault(_https);
var _sockets = require('./sockets');
var _sockets2 = _interopRequireDefault(_sockets);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var port = parseInt(process.env.PORT || _getconfig2.default.server.port, 10);
var redisEndpoint = process.env.REDIS_ENDPOINT || _getconfig2.default.redis.endpoint;
var redisPort = process.env.REDIS_PORT || _getconfig2.default.redis.port;
var numProcesses = _os2.default.cpus().length;
if (_cluster2.default.isMaster) {
var workers = [];
var spawn = function spawn(i) {
workers[i] = _cluster2.default.fork();
// Persistence
workers[i].on('exit', function (code, signal) {
console.log('Worker ' + i + ' exited with signal ' + signal);
console.log('Respawning worker', i);
spawn(i);
});
};
for (var i = 0; i < numProcesses; i += 1) {
console.log('Starting worker ' + (i + 1));
spawn(i);
}
var workerIndex = function workerIndex(ip, len) {
return (// Farmhash is the fastest and works with IPv6, too
_farmhash2.default.fingerprint32(ip) % len
);
};
// Create the outside facing server listening on our port.
var masterServer = _net2.default.createServer({ pauseOnConnect: true }, function (connection) {
// We received a connection and need to pass it to the appropriate
// worker. Get the worker for this connection's source IP and pass
// it the connection.
var worker = workers[workerIndex(connection.remoteAddress, numProcesses)];
worker.send('sticky-session:connection', connection);
}).listen(port);
console.log('Listening at ' + (_getconfig2.default.server.secure ? 'https' : 'http') + '://' + (process.env.NODE_ENV === 'production' ? '0.0.0.0' : 'localhost') + ':' + port + '/');
} else {
var serverHandler = function serverHandler(req, res) {
if (req.url === '/healthcheck') {
console.log(Date.now(), 'healthcheck');
res.writeHead(200);
res.end();
return;
}
res.writeHead(404);
res.end('worker: ' + _cluster2.default.worker.id);
};
var server = null;
// Create an http(s) server instance to that socket.io can listen to
if (_getconfig2.default.server.secure) {
server = _https2.default.Server({
key: _fs2.default.readFileSync(process.env.PRIV_KEY || _getconfig2.default.server.key),
cert: _fs2.default.readFileSync(process.env.CERT || _getconfig2.default.server.cert),
passphrase: _getconfig2.default.server.password
}, serverHandler);
} else {
server = _http2.default.Server(serverHandler);
}
if (!_stickySession2.default.listen(server, port)) {
// Master
} else {
// Worker
}
server.listen(0);
(0, _sockets2.default)(server, Object.assign({ redisEndpoint: redisEndpoint, redisPort: redisPort }, _getconfig2.default));
if (_getconfig2.default.uid) process.setuid(_getconfig2.default.uid);
process.on('message', function (message, connection) {
if (message !== 'sticky-session:connection') {
return;
}
// Emulate a connection event on the server by emitting the
// event with the connection the master sent us.
server.emit('connection', connection);
connection.resume();
});
}
\ No newline at end of file
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _arguments = arguments;
var _socket = require('socket.io');
var _socket2 = _interopRequireDefault(_socket);
......@@ -13,25 +8,22 @@ var _v = require('uuid/v4');
var _v2 = _interopRequireDefault(_v);
var _crypto = require('crypto');
var _crypto2 = _interopRequireDefault(_crypto);
var _socket3 = require('socket.io-redis');
var _freeice = require('freeice');
var _socket4 = _interopRequireDefault(_socket3);
var _freeice2 = _interopRequireDefault(_freeice);
var _util = require('./util');
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
exports.default = function (server, config) {
function signalbuddy(server, config) {
var _arguments = arguments;
var io = _socket2.default.listen(server);
io.adapter((0, _socket4.default)({ host: config.redis.host, port: config.redis.port }));
io.on('connection', function (client) {
client.resources = {
screen: false,
video: true,
video: false,
audio: false
};
......@@ -133,35 +125,9 @@ exports.default = function (server, config) {
});
});
/*
client.on('trace', (data) => {
// console.log('trace', JSON.stringify([data.type, data.session, data.prefix, data.peer, data.time, data.value]));
});
*/
// tell client about stun and turn servers and generate nonces
client.emit('stunservers', config.stunservers || []);
// create shared secret nonces for TURN authentication
// the process is described in draft-uberti-behave-turn-rest
var credentials = [];
// allow selectively vending turn credentials based on origin.
var origin = client.handshake.headers.origin;
if (!config.turnorigins || config.turnorigins.includes(origin)) {
config.turnservers.forEach(function (server) {
var hmac = _crypto2.default.createHmac('sha1', server.secret);
// default to 86400 seconds timeout unless specified
var username = '' + (Math.floor(new Date().getTime() / 1000) + parseInt(server.expiry || 86400, 10));
hmac.update(username);
credentials.push({
username: username,
credential: hmac.digest('base64'),
urls: server.urls || server.url
});
});
}
client.emit('turnservers', credentials);
client.emit('stunservers', (0, _freeice2.default)({ stunCount: config.stunCount, turnCount: 0 }));
client.emit('turnservers', (0, _freeice2.default)({ stunCount: 0, turnCount: config.turnCount }));
});
function describeRoom(roomName) {
......@@ -187,4 +153,6 @@ exports.default = function (server, config) {
});
});
}
};
\ No newline at end of file
};
module.exports = signalbuddy;
\ No newline at end of file
......@@ -3,14 +3,11 @@
"description": "A scalable socket.io signaling solution for WebRTC using NodeJS cluster and Redis.",
"version": "1.0.0",
"dependencies": {
"farmhash": "^2.1.0",
"getconfig": "^4.3.0",
"uuid": "^3.3.3",
"freeice": "^2.2.2",
"socket.io": "^2.3.0",
"socket.io-redis": "5.2.0",
"sticky-session": "^1.1.2"
"uuid": "^3.3.3"
},
"main": "dist/server.js",
"main": "dist/sockets.js",
"repository": "https://github.com/lazorfuzz/signalbuddy",
"devDependencies": {
"babel-cli": "^6.26.0",
......@@ -21,14 +18,10 @@
"eslint-plugin-import": "^2.7.0",
"eslint-plugin-jsx-a11y": "^6.0.2",
"eslint-plugin-react": "^7.4.0",
"precommit-hook": "0.3.10",
"socket.io-client": "^1.7.4",
"tape": "^2.13.1"
"precommit-hook": "0.3.10"
},
"license": "MIT",
"scripts": {
"test": "node test/test.js",
"build": "rm -rf dist && babel src --presets babel-preset-env --out-dir dist",
"start": "node dist/server.js"
"build": "rm -rf dist && babel src --presets babel-preset-env --out-dir dist"
}
}
#!/bin/bash
if [ ! -e server.js ]
then
echo "Error: could not find main application server.js file"
echo "You should run the generate-ssl-certs.sh script from the main application root directory"
echo "i.e: bash scripts/generate-ssl-certs.sh"
exit -1
fi
echo "Generating self-signed certificates..."
mkdir -p ./config/sslcerts
openssl genrsa -out ./config/sslcerts/key.pem 1024
openssl req -new -key ./config/sslcerts/key.pem -out ./config/sslcerts/csr.pem
openssl x509 -req -days 9999 -in ./config/sslcerts/csr.pem -signkey ./config/sslcerts/key.pem -out ./config/sslcerts/cert.pem
rm ./config/sslcerts/csr.pem
chmod 600 ./config/sslcerts/key.pem ./config/sslcerts/cert.pem
import config from 'getconfig';
import fs from 'fs';
import os from 'os';
import sticky from 'sticky-session';
import farmhash from 'farmhash';
import net from 'net';
import cluster from 'cluster';
import http from 'http';
import https from 'https';
import sockets from './sockets';
const port = parseInt(process.env.PORT || config.server.port, 10);
const redisEndpoint = process.env.REDIS_ENDPOINT || config.redis.endpoint;
const redisPort = process.env.REDIS_PORT || config.redis.port;
const numProcesses = os.cpus().length;
if (cluster.isMaster) {
const workers = [];
const spawn = (i) => {
workers[i] = cluster.fork();
// Persistence
workers[i].on('exit', (code, signal) => {
console.log(`Worker ${i} exited with signal ${signal}`);
console.log('Respawning worker', i);
spawn(i);
});
};
for (let i = 0; i < numProcesses; i += 1) {
console.log(`Starting worker ${i + 1}`);
spawn(i);
}
const workerIndex = (ip, len) => // Farmhash is the fastest and works with IPv6, too
farmhash.fingerprint32(ip) % len;
// Create the outside facing server listening on our port.
const masterServer = net.createServer({ pauseOnConnect: true }, (connection) => {
// We received a connection and need to pass it to the appropriate
// worker. Get the worker for this connection's source IP and pass
// it the connection.
const worker = workers[workerIndex(connection.remoteAddress, numProcesses)];
worker.send('sticky-session:connection', connection);
}).listen(port);
console.log(`Listening at ${config.server.secure ? 'https' : 'http'}://${process.env.NODE_ENV === 'production' ? '0.0.0.0' : 'localhost'}:${port}/`);
} else {
const serverHandler = (req, res) => {
if (req.url === '/healthcheck') {
console.log(Date.now(), 'healthcheck');
res.writeHead(200);
res.end();
return;
}
res.writeHead(404);
res.end(`worker: ${cluster.worker.id}`);
};
let server = null;
// Create an http(s) server instance to that socket.io can listen to
if (config.server.secure) {
server = https.Server({
key: fs.readFileSync(process.env.PRIV_KEY || config.server.key),
cert: fs.readFileSync(process.env.CERT || config.server.cert),
passphrase: config.server.password
}, serverHandler);
} else {
server = http.Server(serverHandler);
}
if (!sticky.listen(server, port)) {
// Master
} else {
// Worker
}
server.listen(0);
sockets(server, Object.assign({ redisEndpoint, redisPort }, config));
if (config.uid) process.setuid(config.uid);
process.on('message', (message, connection) => {
if (message !== 'sticky-session:connection') {
return;
}
// Emulate a connection event on the server by emitting the
// event with the connection the master sent us.
server.emit('connection', connection);
connection.resume();
});
}
import socketIO from 'socket.io';
import uuidv4 from 'uuid/v4';
import crypto from 'crypto';
import redisAdapter from 'socket.io-redis';
import freeice from 'freeice';
import { safeCb } from './util';
export default (server, config) => {
function signalbuddy(server, config) {
const io = socketIO.listen(server);
io.adapter(redisAdapter({ host: config.redis.host, port: config.redis.port }));
io.on('connection', (client) => {
client.resources = {
screen: false,
video: true,
video: false,
audio: false
};
......@@ -112,34 +110,9 @@ export default (server, config) => {
});
});
/*
client.on('trace', (data) => {
// console.log('trace', JSON.stringify([data.type, data.session, data.prefix, data.peer, data.time, data.value]));
});
*/
// tell client about stun and turn servers and generate nonces
client.emit('stunservers', config.stunservers || []);
// create shared secret nonces for TURN authentication
// the process is described in draft-uberti-behave-turn-rest
const credentials = [];
// allow selectively vending turn credentials based on origin.
const { origin } = client.handshake.headers;
if (!config.turnorigins || config.turnorigins.includes(origin)) {
config.turnservers.forEach((server) => {
const hmac = crypto.createHmac('sha1', server.secret);
// default to 86400 seconds timeout unless specified
const username = `${Math.floor(new Date().getTime() / 1000) + (parseInt(server.expiry || 86400, 10))}`;
hmac.update(username);
credentials.push({
username,
credential: hmac.digest('base64'),
urls: server.urls || server.url
});
});
}
client.emit('turnservers', credentials);
client.emit('stunservers', freeice({stunCount: config.stunCount, turnCount: 0}));
client.emit('turnservers', freeice({stunCount: 0, turnCount: config.turnCount}));
});
function describeRoom(roomName) {
......@@ -166,3 +139,5 @@ export default (server, config) => {
});
}
};
module.exports = signalbuddy;
import tape from 'tape';
import config from 'getconfig';
import io from 'socket.io-client';
// import server from '../src/server';
const test = tape.createHarness();
const output = test.createStream();
output.pipe(process.stdout);
output.on('end', () => {
console.log('Tests complete, killing server.');
process.exit(0);
});
let socketURL;
if (config.server.secure) {
socketURL = `https://localhost:${config.server.port}`;
} else {
socketURL = `http://localhost:${config.server.port}`;
}
const socketOptions = {
transports: ['websocket'],
'force new connection': true,
secure: config.server.secure
};
test('it should not crash when sent an empty message', (t) => {
t.plan(1);
const client = io.connect(socketURL, socketOptions);
client.on('connect', () => {
client.emit('message');
t.ok(true);
});
});
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment