Best JavaScript code snippet using wpt
RTCPeerConnection-helper.js
Source:RTCPeerConnection-helper.js
...216// It does the heavy lifting of performing signaling handshake,217// ICE candidate exchange, and waiting for data channel at two218// end points to open. Can do both negotiated and non-negotiated setup.219async function createDataChannelPair(t, options,220 pc1 = createPeerConnectionWithCleanup(t),221 pc2 = createPeerConnectionWithCleanup(t)) {222 let pair = [], bothOpen;223 try {224 if (options.negotiated) {225 pair = [pc1, pc2].map(pc => pc.createDataChannel('', options));226 bothOpen = Promise.all(pair.map(dc => new Promise((r, e) => {227 dc.onopen = r;228 dc.onerror = ({error}) => e(error);229 })));230 } else {231 pair = [pc1.createDataChannel('', options)];232 bothOpen = Promise.all([233 new Promise((r, e) => {234 pair[0].onopen = r;235 pair[0].onerror = ({error}) => e(error);236 }),237 new Promise((r, e) => pc2.ondatachannel = ({channel}) => {238 pair[1] = channel;239 channel.onopen = r;240 channel.onerror = ({error}) => e(error);241 })242 ]);243 }244 exchangeIceCandidates(pc1, pc2);245 await exchangeOfferAnswer(pc1, pc2);246 await bothOpen;247 return pair;248 } finally {249 for (const dc of pair) {250 dc.onopen = dc.onerror = null;251 }252 }253}254// Wait for RTP and RTCP stats to arrive255async function waitForRtpAndRtcpStats(pc) {256 // If remote stats are never reported, return after 5 seconds.257 const startTime = performance.now();258 while (true) {259 const report = await pc.getStats();260 const stats = [...report.values()].filter(({type}) => type.endsWith("bound-rtp"));261 // Each RTP and RTCP stat has a reference262 // to the matching stat in the other direction263 if (stats.length && stats.every(({localId, remoteId}) => localId || remoteId)) {264 break;265 }266 if (performance.now() > startTime + 5000) {267 break;268 }269 }270}271// Wait for a single message event and return272// a promise that resolve when the event fires273function awaitMessage(channel) {274 const once = true;275 return new Promise((resolve, reject) => {276 channel.addEventListener('message', ({data}) => resolve(data), {once});277 channel.addEventListener('error', reject, {once});278 });279}280// Helper to convert a blob to array buffer so that281// we can read the content282async function blobToArrayBuffer(blob) {283 const reader = new FileReader();284 reader.readAsArrayBuffer(blob);285 return new Promise((resolve, reject) => {286 reader.addEventListener('load', () => resolve(reader.result), {once: true});287 reader.addEventListener('error', () => reject(reader.error), {once: true});288 });289}290// Assert that two TypedArray or ArrayBuffer objects have the same byte values291function assert_equals_typed_array(array1, array2) {292 const [view1, view2] = [array1, array2].map((array) => {293 if (array instanceof ArrayBuffer) {294 return new DataView(array);295 } else {296 assert_true(array.buffer instanceof ArrayBuffer,297 'Expect buffer to be instance of ArrayBuffer');298 return new DataView(array.buffer, array.byteOffset, array.byteLength);299 }300 });301 assert_equals(view1.byteLength, view2.byteLength,302 'Expect both arrays to be of the same byte length');303 const byteLength = view1.byteLength;304 for (let i = 0; i < byteLength; ++i) {305 assert_equals(view1.getUint8(i), view2.getUint8(i),306 `Expect byte at buffer position ${i} to be equal`);307 }308}309// These media tracks will be continually updated with deterministic "noise" in310// order to ensure UAs do not cease transmission in response to apparent311// silence.312//313// > Many codecs and systems are capable of detecting "silence" and changing314// > their behavior in this case by doing things such as not transmitting any315// > media.316//317// Source: https://w3c.github.io/webrtc-pc/#offer-answer-options318const trackFactories = {319 // Share a single context between tests to avoid exceeding resource limits320 // without requiring explicit destruction.321 audioContext: null,322 /**323 * Given a set of requested media types, determine if the user agent is324 * capable of procedurally generating a suitable media stream.325 *326 * @param {object} requested327 * @param {boolean} [requested.audio] - flag indicating whether the desired328 * stream should include an audio track329 * @param {boolean} [requested.video] - flag indicating whether the desired330 * stream should include a video track331 *332 * @returns {boolean}333 */334 canCreate(requested) {335 const supported = {336 audio: !!window.AudioContext && !!window.MediaStreamAudioDestinationNode,337 video: !!HTMLCanvasElement.prototype.captureStream338 };339 return (!requested.audio || supported.audio) &&340 (!requested.video || supported.video);341 },342 audio() {343 const ctx = trackFactories.audioContext = trackFactories.audioContext ||344 new AudioContext();345 const oscillator = ctx.createOscillator();346 const dst = oscillator.connect(ctx.createMediaStreamDestination());347 oscillator.start();348 return dst.stream.getAudioTracks()[0];349 },350 video({width = 640, height = 480, signal} = {}) {351 const canvas = Object.assign(352 document.createElement("canvas"), {width, height}353 );354 const ctx = canvas.getContext('2d');355 const stream = canvas.captureStream();356 let count = 0;357 const interval = setInterval(() => {358 ctx.fillStyle = `rgb(${count%255}, ${count*count%255}, ${count%255})`;359 count += 1;360 ctx.fillRect(0, 0, width, height);361 // Add some bouncing boxes in contrast color to add a little more noise.362 const contrast = count + 128;363 ctx.fillStyle = `rgb(${contrast%255}, ${contrast*contrast%255}, ${contrast%255})`;364 const xpos = count % (width - 20);365 const ypos = count % (height - 20);366 ctx.fillRect(xpos, ypos, xpos + 20, ypos + 20);367 const xpos2 = (count + width / 2) % (width - 20);368 const ypos2 = (count + height / 2) % (height - 20);369 ctx.fillRect(xpos2, ypos2, xpos2 + 20, ypos2 + 20);370 // If signal is set (0-255), add a constant-color box of that luminance to371 // the video frame at coordinates 20 to 60 in both X and Y direction.372 // (big enough to avoid color bleed from surrounding video in some codecs,373 // for more stable tests).374 if (signal != undefined) {375 ctx.fillStyle = `rgb(${signal}, ${signal}, ${signal})`;376 ctx.fillRect(20, 20, 40, 40);377 }378 }, 100);379 if (document.body) {380 document.body.appendChild(canvas);381 } else {382 document.addEventListener('DOMContentLoaded', () => {383 document.body.appendChild(canvas);384 }, {once: true});385 }386 // Implement track.stop() for performance in some tests on some platforms387 const track = stream.getVideoTracks()[0];388 const nativeStop = track.stop;389 track.stop = function stop() {390 clearInterval(interval);391 nativeStop.apply(this);392 if (document.body && canvas.parentElement == document.body) {393 document.body.removeChild(canvas);394 }395 };396 return track;397 }398};399// Get the signal from a video element inserted by createNoiseStream400function getVideoSignal(v) {401 if (v.videoWidth < 60 || v.videoHeight < 60) {402 throw new Error('getVideoSignal: video too small for test');403 }404 const canvas = document.createElement("canvas");405 canvas.width = canvas.height = 60;406 const context = canvas.getContext('2d');407 context.drawImage(v, 0, 0);408 // Extract pixel value at position 40, 40409 const pixel = context.getImageData(40, 40, 1, 1);410 // Use luma reconstruction to get back original value according to411 // ITU-R rec BT.709412 return (pixel.data[0] * 0.21 + pixel.data[1] * 0.72 + pixel.data[2] * 0.07);413}414async function detectSignal(t, v, value) {415 while (true) {416 const signal = getVideoSignal(v).toFixed();417 // allow off-by-two pixel error (observed in some implementations)418 if (value - 2 <= signal && signal <= value + 2) {419 return;420 }421 // We would like to wait for each new frame instead here,422 // but there seems to be no such callback.423 await new Promise(r => t.step_timeout(r, 100));424 }425}426// Generate a MediaStream bearing the specified tracks.427//428// @param {object} [caps]429// @param {boolean} [caps.audio] - flag indicating whether the generated stream430// should include an audio track431// @param {boolean} [caps.video] - flag indicating whether the generated stream432// should include a video track, or parameters for video433async function getNoiseStream(caps = {}) {434 if (!trackFactories.canCreate(caps)) {435 return navigator.mediaDevices.getUserMedia(caps);436 }437 const tracks = [];438 if (caps.audio) {439 tracks.push(trackFactories.audio());440 }441 if (caps.video) {442 tracks.push(trackFactories.video(caps.video));443 }444 return new MediaStream(tracks);445}446// Obtain a MediaStreamTrack of kind using procedurally-generated streams (and447// falling back to `getUserMedia` when the user agent cannot generate the448// requested streams).449// Return Promise of pair of track and associated mediaStream.450// Assumes that there is at least one available device451// to generate the track.452function getTrackFromUserMedia(kind) {453 return getNoiseStream({ [kind]: true })454 .then(mediaStream => {455 const [track] = mediaStream.getTracks();456 return [track, mediaStream];457 });458}459// Obtain |count| MediaStreamTracks of type |kind| and MediaStreams. The tracks460// do not belong to any stream and the streams are empty. Returns a Promise461// resolved with a pair of arrays [tracks, streams].462// Assumes there is at least one available device to generate the tracks and463// streams and that the getUserMedia() calls resolve.464function getUserMediaTracksAndStreams(count, type = 'audio') {465 let otherTracksPromise;466 if (count > 1)467 otherTracksPromise = getUserMediaTracksAndStreams(count - 1, type);468 else469 otherTracksPromise = Promise.resolve([[], []]);470 return otherTracksPromise.then(([tracks, streams]) => {471 return getTrackFromUserMedia(type)472 .then(([track, stream]) => {473 // Remove the default stream-track relationship.474 stream.removeTrack(track);475 tracks.push(track);476 streams.push(stream);477 return [tracks, streams];478 });479 });480}481// Performs an offer exchange caller -> callee.482async function exchangeOffer(caller, callee) {483 await caller.setLocalDescription(await caller.createOffer());484 await callee.setRemoteDescription(caller.localDescription);485}486// Performs an answer exchange caller -> callee.487async function exchangeAnswer(caller, callee) {488 // Note that caller's remote description must be set first; if not,489 // there's a chance that candidates from callee arrive at caller before490 // it has a remote description to apply them to.491 const answer = await callee.createAnswer();492 await caller.setRemoteDescription(answer);493 await callee.setLocalDescription(answer);494}495async function exchangeOfferAnswer(caller, callee) {496 await exchangeOffer(caller, callee);497 await exchangeAnswer(caller, callee);498}499// The returned promise is resolved with caller's ontrack event.500async function exchangeAnswerAndListenToOntrack(t, caller, callee) {501 const ontrackPromise = addEventListenerPromise(t, caller, 'track');502 await exchangeAnswer(caller, callee);503 return ontrackPromise;504}505// The returned promise is resolved with callee's ontrack event.506async function exchangeOfferAndListenToOntrack(t, caller, callee) {507 const ontrackPromise = addEventListenerPromise(t, callee, 'track');508 await exchangeOffer(caller, callee);509 return ontrackPromise;510}511// The resolver extends a |promise| that can be resolved or rejected using |resolve|512// or |reject|.513class Resolver extends Promise {514 constructor(executor) {515 let resolve, reject;516 super((resolve_, reject_) => {517 resolve = resolve_;518 reject = reject_;519 if (executor) {520 return executor(resolve_, reject_);521 }522 });523 this._done = false;524 this._resolve = resolve;525 this._reject = reject;526 }527 /**528 * Return whether the promise is done (resolved or rejected).529 */530 get done() {531 return this._done;532 }533 /**534 * Resolve the promise.535 */536 resolve(...args) {537 this._done = true;538 return this._resolve(...args);539 }540 /**541 * Reject the promise.542 */543 reject(...args) {544 this._done = true;545 return this._reject(...args);546 }547}548function addEventListenerPromise(t, obj, type, listener) {549 if (!listener) {550 return waitUntilEvent(obj, type);551 }552 return new Promise(r => obj.addEventListener(type,553 t.step_func(e => r(listener(e))),554 {once: true}));555}556function createPeerConnectionWithCleanup(t) {557 const pc = new RTCPeerConnection();558 t.add_cleanup(() => pc.close());559 return pc;560}561async function createTrackAndStreamWithCleanup(t, kind = 'audio') {562 let constraints = {};563 constraints[kind] = true;564 const stream = await getNoiseStream(constraints);565 const [track] = stream.getTracks();566 t.add_cleanup(() => track.stop());567 return [track, stream];568}569function findTransceiverForSender(pc, sender) {570 const transceivers = pc.getTransceivers();...
Using AI Code Generation
1var pc = createPeerConnectionWithCleanup();2var dc = createDataChannelWithCleanup(pc);3var offer = createOfferWithCleanup(pc);4var answer = createAnswerWithCleanup(pc);5setLocalDescriptionWithCleanup(pc, offer);6setRemoteDescriptionWithCleanup(pc, answer);
Using AI Code Generation
1var pc1 = createPeerConnectionWithCleanup();2var pc2 = createPeerConnectionWithCleanup();3pc1.createOffer()4.then(offer => pc1.setLocalDescription(offer))5.then(() => pc2.setRemoteDescription(pc1.localDescription))6.then(() => pc2.createAnswer())7.then(answer => pc2.setLocalDescription(answer))8.then(() => pc1.setRemoteDescription(pc2.localDescription))9.then(() => pc1.addIceCandidate(pc2.localDescription))10.then(() => pc2.addIceCandidate(pc1.localDescription));
Using AI Code Generation
1createPeerConnectionWithCleanup (test, config) 2createDataChannelWithCleanup (test, pc, label, options) 3createOfferWithCleanup (test, pc, options) 4createAnswerWithCleanup (test, pc, options) 5setLocalDescriptionWithCleanup (test, pc, desc) 6setRemoteDescriptionWithCleanup (test, pc, desc) 7addIceCandidateWithCleanup (test, pc, candidate, expectedError) 8createOfferAndSetLocalDescriptionWithCleanup (test, pc, options) 9createOfferAndSetRemoteDescriptionWithCleanup (test, pc, options) 10createAnswerAndSetLocalDescriptionWithCleanup (test, pc, options) 11createAnswerAndSetRemoteDescriptionWithCleanup (test, pc, options)
Using AI Code Generation
1createPeerConnectionWithCleanup({iceServers:[]}).then(function(pc) {2 return pc.createOffer({offerToReceiveVideo: true});3}).then(function(offer) {4 return pc.setLocalDescription(offer);5}).then(function() {6 return pc.createAnswer();7}).then(function(answer) {8 return pc.setRemoteDescription(answer);9}).then(function() {10 testPassed('createAnswer() resolves');11}).catch(function(error) {12 testFailed('createAnswer() failed with ' + error.name);13});14function createPeerConnectionWithCleanup(options) {15 return new Promise(function(resolve, reject) {16 var pc = new RTCPeerConnection(options);17 pc.addEventListener('iceconnectionstatechange', function() {18 if (pc.iceConnectionState == 'connected') {19 resolve(pc);20 }21 });22 pc.addEventListener('icecandidate', function(evt) {23 if (evt.candidate) {24 return;25 }26 pc.createOffer().then(function(offer) {27 return pc.setLocalDescription(offer);28 }).catch(reject);29 });30 pc.addEventListener('negotiationneeded', function() {31 pc.createAnswer().then(function(answer) {32 return pc.setRemoteDescription(answer);33 }).catch(reject);34 });35 pc.addEventListener('iceconnectionstatechange', function() {36 if (pc.iceConnectionState == 'closed') {37 pc = null;38 }39 });40 });41}42function createPeerConnectionWithCleanup(options) {43 return new Promise(function(resolve, reject) {44 var pc = new RTCPeerConnection(options);45 pc.addEventListener('iceconnectionstatechange', function() {46 if (pc.iceConnectionState == 'connected') {47 resolve(pc);48 }49 });50 pc.addEventListener('icecandidate', function(evt) {51 if (evt.candidate) {52 return;53 }54 pc.createOffer().then(function(offer) {55 return pc.setLocalDescription(offer);56 }).catch(reject);57 });58 pc.addEventListener('negotiationneeded', function() {59 pc.createAnswer().then(function(answer) {
Using AI Code Generation
1var pc = createPeerConnectionWithCleanup(null);2var dc = pc.createDataChannel('datachannel');3dc.onopen = function() {4 pc.close();5};6dc.onclose = function() {7 test.done();8};9promise_test(function(t) {10 return navigator.mediaDevices.getUserMedia({audio: true, video: true})11 .then(function(stream) {12 var pc = new RTCPeerConnection(null);13 pc.addStream(stream);14 var dc = pc.createDataChannel('datachannel');15 dc.onopen = function() {16 pc.close();17 };18 dc.onclose = function() {19 test.done();20 };21 });22}, 'Test 1: Test createDataChannel with getUserMedia');23promise_test(function(t) {24 return navigator.mediaDevices.getUserMedia({audio: true, video: true})25 .then(function(stream) {26 var pc = new RTCPeerConnection(null);27 pc.addStream(stream);28 var dc = pc.createDataChannel('datachannel');29 dc.onopen = function() {30 pc.close();31 };32 dc.onclose = function() {33 test.done();34 };35 });36}, 'Test 2: Test createDataChannel with getUserMedia');37promise_test(function(t) {38 return navigator.mediaDevices.getUserMedia({audio: true, video: true})39 .then(function(stream) {40 var pc = new RTCPeerConnection(null);41 pc.addStream(stream);42 var dc = pc.createDataChannel('datachannel');43 dc.onopen = function() {44 pc.close();45 };46 dc.onclose = function() {47 test.done();48 };49 });50}, 'Test 3: Test createDataChannel with getUserMedia');51promise_test(function(t) {52 return navigator.mediaDevices.getUserMedia({audio: true, video: true})53 .then(function(stream) {54 var pc = new RTCPeerConnection(null);55 pc.addStream(stream);56 var dc = pc.createDataChannel('datachannel');57 dc.onopen = function() {58 pc.close();59 };60 dc.onclose = function() {61 test.done();62 };63 });64}, 'Test 4: Test createDataChannel with getUserMedia');65promise_test(function(t) {66 return navigator.mediaDevices.getUserMedia({audio: true, video: true})
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!