How to record webcam and audio using webRTC and a server-based Peer connection

I would like to record the users webcam and audio and save it to a file on the server. These files would then be able to be served up to other users.

I have no problems with playback, however I'm having problems getting the content to record.

My understanding is that the getUserMedia .record() function has not yet been written - only a proposal has been made for it so far.

I would like to create a peer connection on my server using the PeerConnectionAPI. I understand this is a bit hacky, but I'm thinking it should be possible to create a peer on the server and record what the client-peer sends.

If this is possible, I should then be able to save this data to flv or any other video format.

My preference is actually to record the webcam + audio client-side, to allow the client to re-record videos if they didn't like their first attempt before uploading. This would also allow for interruptions in network connections. I've seen some code which allows recording of individual 'images' from the webcam by sending the data to the canvas - that's cool, but I need the audio too.

Here's the client side code I have so far:

  <video autoplay></video>

<script language="javascript" type="text/javascript">
function onVideoFail(e) {
    console.log('webcam fail!', e);

function hasGetUserMedia() {
  // Note: Opera is unprefixed.
  return !!(navigator.getUserMedia || navigator.webkitGetUserMedia ||
            navigator.mozGetUserMedia || navigator.msGetUserMedia);

if (hasGetUserMedia()) {
  // Good to go!
} else {
  alert('getUserMedia() is not supported in your browser');

window.URL = window.URL || window.webkitURL;
navigator.getUserMedia  = navigator.getUserMedia || navigator.webkitGetUserMedia ||
                          navigator.mozGetUserMedia || navigator.msGetUserMedia;

var video = document.querySelector('video');
var streamRecorder;
var webcamstream;

if (navigator.getUserMedia) {
  navigator.getUserMedia({audio: true, video: true}, function(stream) {
    video.src = window.URL.createObjectURL(stream);
    webcamstream = stream;
//  streamrecorder = webcamstream.record();
  }, onVideoFail);
} else {
    alert ('failed');

function startRecording() {
    streamRecorder = webcamstream.record();
    setTimeout(stopRecording, 10000);
function stopRecording() {
function postVideoToServer(videoblob) {
/*  var x = new XMLHttpRequest();'POST', 'uploadMessage');
    var data = {}; = videoblob;
    data.metadata = 'test metadata';
    data.action = "upload_video";"", data, onUploadSuccess);
function onUploadSuccess() {
    alert ('video uploaded');


<div id="webcamcontrols">
    <a class="recordbutton" href="javascript:startRecording();">RECORD</a>



You should definitely have a look at Kurento. It provides a WebRTC server infrastructure that allows you to record from a WebRTC feed and much more. You can also find some examples for the application you are planning here. It is really easy to add recording capabilities to that demo, and store the media file in a URI (local disk or wherever).

The project is licensed under LGPL Apache 2.0


Since this post, we've added a new tutorial that shows how to add the recorder in a couple of scenarios

Disclaimer: I'm part of the team that develops Kurento.


Please, check the RecordRTC

RecordRTC is MIT licensed on github.


I believe using kurento or other MCUs just for recording videos would be bit of overkill, especially considering the fact that Chrome has MediaRecorder API support from v47 and Firefox since v25. So at this junction, you might not even need an external js library to do the job, try this demo I made to record video/ audio using MediaRecorder:

Demo - would work in chrome and firefox (intentionally left out pushing blob to server code)

Github Code Source

If running firefox, you could test it in here itself( chrome needs https):

'use strict'

let log = console.log.bind(console),
  id = val => document.getElementById(val),
  ul = id('ul'),
  gUMbtn = id('gUMbtn'),
  start = id('start'),
  stop = id('stop'),
  counter = 1,

gUMbtn.onclick = e => {
  let mv = id('mediaVideo'),
    mediaOptions = {
      video: {
        tag: 'video',
        type: 'video/webm',
        ext: '.mp4',
        gUM: {
          video: true,
          audio: true
      audio: {
        tag: 'audio',
        type: 'audio/ogg',
        ext: '.ogg',
        gUM: {
          audio: true
  media = mv.checked ? :;
  navigator.mediaDevices.getUserMedia(media.gUM).then(_stream => {
    stream = _stream;
    id('gUMArea').style.display = 'none';
    id('btns').style.display = 'inherit';
    recorder = new MediaRecorder(stream);
    recorder.ondataavailable = e => {
      if (recorder.state == 'inactive') makeLink();
    log('got media successfully');

start.onclick = e => {
  start.disabled = true;
  chunks = [];

stop.onclick = e => {
  stop.disabled = true;

function makeLink() {
  let blob = new Blob(chunks, {
      type: media.type
    url = URL.createObjectURL(blob),
    li = document.createElement('li'),
    mt = document.createElement(media.tag),
    hf = document.createElement('a');
  mt.controls = true;
  mt.src = url;
  hf.href = url; = `${counter++}${media.ext}`;
  hf.innerHTML = `donwload ${}`;
      button {
        margin: 10px 5px;
      li {
        margin: 10px;
      body {
        width: 90%;
        max-width: 960px;
        margin: 0px auto;
      #btns {
        display: none;
      h1 {
        margin-bottom: 100px;
<link type="text/css" rel="stylesheet" href="">
<h1> MediaRecorder API example</h1>

<p>For now it is supported only in Firefox(v25+) and Chrome(v47+)</p>
<div id='gUMArea'>
    <input type="radio" name="media" value="video" checked id='mediaVideo'>Video
    <input type="radio" name="media" value="audio">audio
  <button class="btn btn-default" id='gUMbtn'>Request Stream</button>
<div id='btns'>
  <button class="btn btn-default" id='start'>Start</button>
  <button class="btn btn-default" id='stop'>Stop</button>
  <ul class="list-unstyled" id='ul'></ul>
<script src=""></script>
<script src=""></script>


yes, as you understood, MediaStreamRecorder is currently unimplemented.

MediaStreamRecorder is a WebRTC API for recording getUserMedia() streams . It allows web apps to create a file from a live audio/video session.

alternatively you may do like this but audio is missing part.


Web Call Server 4 can record WebRTC audio and video to WebM container. The recording is done using Vorbis codec for audio and VP8 codec for video. Iniitial WebRTC codecs are Opus or G.711 and VP8. So, the server-side recording requires either Opus/G.711 to Vorbis server-side transcoding or VP8-H.264 transcoding if it is necessary to use another container, i.e. AVI.


Check out Janus. Here is a recording demo:

Unlike Kurento, whose development has slowed down severely after the Twilio acquisition, Janus continues to be actively developed and supported.


For the record I also don't have enough knowledge about this,

But I found this on Git hub-

<!DOCTYPE html>
  <title>XSockets.WebRTC Client example</title>
  <meta charset="utf-8" />

body {

.localvideo {
position: absolute;
right: 10px;
top: 10px;

.localvideo video {
max-width: 240px;
border: 2px solid #333;

 .remotevideos {

.remotevideos video{
<h1>XSockets.WebRTC Client example </h1>
<div class="localvideo">
    <video autoplay></video>

<h2>Remote videos</h2>
<div class="remotevideos">

<h2>Recordings  ( Click on your camera stream to start record)</h2>

<div id="immediate"></div>
<script src="XSockets.latest.js"></script>
<script src="adapter.js"></script>
<script src="bobBinder.js"></script>
<script src="xsocketWebRTC.js"></script>
    var $ = function (selector, el) {
        if (!el) el = document;
        return el.querySelector(selector);
    var trace = function (what, obj) {
        var pre = document.createElement("pre");
        pre.textContent = JSON.stringify(what) + " - " + JSON.stringify(obj || "");
    var main = (function () {
        var broker;
        var rtc;
        trace("Try connect the connectionBroker");
        var ws = new XSockets.WebSocket("wss://", ["connectionbroker"], {
            ctx: '23fbc61c-541a-4c0d-b46e-1a1f6473720a'
        var onError = function (err) {
            trace("error", arguments);
        var recordMediaStream = function (stream) {
            if ("MediaRecorder" in window === false) {
                trace("Recorder not started MediaRecorder not available in this browser. ");
            var recorder = new XSockets.MediaRecorder(stream);
            trace("Recorder started.. ");
            recorder.oncompleted = function (blob, blobUrl) {
                trace("Recorder completed.. ");
                var li = document.createElement("li");
                var download = document.createElement("a");
                download.textContent = new Date();
                download.setAttribute("download", XSockets.Utils.randomString(8) + ".webm");
                download.setAttribute("href", blobUrl);
        var addRemoteVideo = function (peerId, mediaStream) {
            var remoteVideo = document.createElement("video");
            remoteVideo.setAttribute("autoplay", "autoplay");
            remoteVideo.setAttribute("rel", peerId);
            attachMediaStream(remoteVideo, mediaStream);
        var onConnectionLost = function (remotePeer) {
            trace("onconnectionlost", arguments);
            var peerId = remotePeer.PeerId;
            var videoToRemove = $("video[rel='" + peerId + "']");
        var oncConnectionCreated = function () {
            console.log(arguments, rtc);
            trace("oncconnectioncreated", arguments);
        var onGetUerMedia = function (stream) {
            trace("Successfully got some userMedia , hopefully a goat will appear..");
            rtc.connectToContext(); // connect to the current context?
        var onRemoteStream = function (remotePeer) {
            trace("Opps, we got a remote stream. lets see if its a goat..");
        var onLocalStream = function (mediaStream) {
            trace("Got a localStream",;
            attachMediaStream($(".localvideo video "), mediaStream);
            // if user click, video , call the recorder
            $(".localvideo video ").addEventListener("click", function () {
        var onContextCreated = function (ctx) {
            trace("RTC object created, and a context is created - ", ctx);
            rtc.getUserMedia(rtc.userMediaConstraints.hd(false), onGetUerMedia, onError);
        var onOpen = function () {
            trace("Connected to the brokerController - 'connectionBroker'");
            rtc = new XSockets.WebRTC(this);
            rtc.onlocalstream = onLocalStream;
            rtc.oncontextcreated = onContextCreated;
            rtc.onconnectioncreated = oncConnectionCreated;
            rtc.onconnectionlost = onConnectionLost;
            rtc.onremotestream = onRemoteStream;
            rtc.onanswer = function (event) {
            rtc.onoffer = function (event) {
        var onConnected = function () {
            trace("connection to the 'broker' server is established");
            trace("Try get the broker controller form server..");
            broker = ws.controller("connectionbroker");
            broker.onopen = onOpen;
        ws.onconnected = onConnected;
    document.addEventListener("DOMContentLoaded", main);

On Line number 89 in my case code OnrecordComplete actually append a link of recorder file, if you will click on that link it will start the download, you can save that path to your server as a file.

The Recording code looks something like this

recorder.oncompleted = function (blob, blobUrl) {
                trace("Recorder completed.. ");
                var li = document.createElement("li");
                var download = document.createElement("a");
                download.textContent = new Date();
                download.setAttribute("download", XSockets.Utils.randomString(8) + ".webm");
                download.setAttribute("href", blobUrl);

The blobUrl holds the path. I solved my problem with this, hope someone will find this useful


Technically you can use FFMPEG on backend to mix video and audio


You can use RecordRTC-together, which is based on RecordRTC.

It supports recording video and audio together in separate files. You will need tool like ffmpeg to merge two files into one on server.


Recent Questions

Top Questions

Home Tags Terms of Service Privacy Policy DMCA Contact Us

©2020 All rights reserved.