allow off-by-two pixel error in detectSignal wpt helper + implement getNoiseStream track.stop().
Differential Revision: https://phabricator.services.mozilla.com/D81584
bugzilla-url: https://bugzilla.mozilla.org/show_bug.cgi?id=1645626
gecko-commit: abccebfeca2298d55b1a76d3780665b1645aa26e
gecko-integration-branch: autoland
gecko-reviewers: bwc
diff --git a/webrtc/RTCPeerConnection-helper.js b/webrtc/RTCPeerConnection-helper.js
index 2d91e5f..54aaa01 100644
--- a/webrtc/RTCPeerConnection-helper.js
+++ b/webrtc/RTCPeerConnection-helper.js
@@ -398,7 +398,7 @@
return dst.stream.getAudioTracks()[0];
},
- video({width = 640, height = 480, signal = null} = {}) {
+ video({width = 640, height = 480, signal} = {}) {
const canvas = Object.assign(
document.createElement("canvas"), {width, height}
);
@@ -406,15 +406,17 @@
const stream = canvas.captureStream();
let count = 0;
- setInterval(() => {
+ const interval = setInterval(() => {
ctx.fillStyle = `rgb(${count%255}, ${count*count%255}, ${count%255})`;
count += 1;
ctx.fillRect(0, 0, width, height);
- // If signal is set, add a constant-color box to the video frame
- // at coordinates 10 to 30 in both X and Y direction.
- if (signal !== null) {
+ // If signal is set (0-255), add a constant-color box of that luminance to
+ // the video frame at coordinates 20 to 60 in both X and Y direction.
+ // (big enough to avoid color bleed from surrounding video in some codecs,
+ // for more stable tests).
+ if (signal != undefined) {
ctx.fillStyle = `rgb(${signal}, ${signal}, ${signal})`;
- ctx.fillRect(10, 10, 20, 20);
+ ctx.fillRect(20, 20, 40, 40);
}
}, 100);
@@ -423,25 +425,34 @@
} else {
document.addEventListener('DOMContentLoaded', () => {
document.body.appendChild(canvas);
- });
+ }, {once: true});
}
- return stream.getVideoTracks()[0];
+ // Implement track.stop() for performance in some tests on some platforms
+ const track = stream.getVideoTracks()[0];
+ const nativeStop = track.stop;
+ track.stop = function stop() {
+ clearInterval(interval);
+ nativeStop.apply(this);
+ if (document.body && canvas.parentElement == document.body) {
+ document.body.removeChild(canvas);
+ }
+ };
+ return track;
}
};
// Get the signal from a video element inserted by createNoiseStream
function getVideoSignal(v) {
- if (v.videoWidth < 21 || v.videoHeight < 21) {
- return null;
+ if (v.videoWidth < 60 || v.videoHeight < 60) {
+ throw new Error('getVideoSignal: video too small for test');
}
const canvas = document.createElement("canvas");
- canvas.width = v.videoWidth;
- canvas.height = v.videoHeight;
+ canvas.width = canvas.height = 60;
const context = canvas.getContext('2d');
- context.drawImage(v, 0, 0, v.videoWidth, v.videoHeight);
- // Extract pixel value at position 20, 20
- const pixel = context.getImageData(20, 20, 1, 1);
+ context.drawImage(v, 0, 0);
+ // Extract pixel value at position 40, 40
+ const pixel = context.getImageData(40, 40, 1, 1);
// Use luma reconstruction to get back original value according to
// ITU-R rec BT.709
return (pixel.data[0] * 0.21 + pixel.data[1] * 0.72 + pixel.data[2] * 0.07);
@@ -449,8 +460,9 @@
async function detectSignal(t, v, value) {
while (true) {
- const signal = getVideoSignal(v);
- if (signal !== null && signal < value + 1 && signal > value - 1) {
+ const signal = getVideoSignal(v).toFixed();
+ // allow off-by-two pixel error (observed in some implementations)
+ if (value - 2 <= signal && signal <= value + 2) {
return;
}
// We would like to wait for each new frame instead here,
diff --git a/webrtc/RTCPeerConnection-videoDetectorTest.html b/webrtc/RTCPeerConnection-videoDetectorTest.html
index 71fffdc..6786bd4 100644
--- a/webrtc/RTCPeerConnection-videoDetectorTest.html
+++ b/webrtc/RTCPeerConnection-videoDetectorTest.html
@@ -22,38 +22,38 @@
// the test times out.
async function signalSettlementTime(t, v, sender, signal, backgroundTrack) {
- const detectionStream = await getNoiseStream({video: {signal: signal}});
+ const detectionStream = await getNoiseStream({video: {signal}});
const [detectionTrack] = detectionStream.getTracks();
- await sender.replaceTrack(detectionTrack);
- const framesBefore = v.getVideoPlaybackQuality().totalVideoFrames;
- await detectSignal(t, v, signal);
- const framesAfter = v.getVideoPlaybackQuality().totalVideoFrames;
- await sender.replaceTrack(backgroundTrack);
- await detectSignal(t, v, 100);
- detectionStream.getTracks().forEach(track => track.stop());
- return (framesAfter - framesBefore);
+ try {
+ await sender.replaceTrack(detectionTrack);
+ const framesBefore = v.getVideoPlaybackQuality().totalVideoFrames;
+ await detectSignal(t, v, signal);
+ const framesAfter = v.getVideoPlaybackQuality().totalVideoFrames;
+ await sender.replaceTrack(backgroundTrack);
+ await detectSignal(t, v, 100);
+ return framesAfter - framesBefore;
+ } finally {
+ detectionTrack.stop();
+ }
}
promise_test(async t => {
const v = document.createElement('video');
v.autoplay = true;
const pc1 = new RTCPeerConnection();
- t.add_cleanup(() => pc1.close());
const pc2 = new RTCPeerConnection();
+ t.add_cleanup(() => pc1.close());
t.add_cleanup(() => pc2.close());
const stream1 = await getNoiseStream({video: {signal: 100}});
- t.add_cleanup(() => stream1.getTracks().forEach(track => track.stop()));
const [track1] = stream1.getTracks();
+ t.add_cleanup(() => track1.stop());
+
const sender = pc1.addTrack(track1);
- pc2.ontrack = t.step_func((e) => {
- v.srcObject = new MediaStream([e.track]);
- });
- const metadataToBeLoaded = new Promise((resolve) => {
- v.addEventListener('loadedmetadata', resolve);
- });
+ const haveTrackEvent = new Promise(r => pc2.ontrack = r);
exchangeIceCandidates(pc1, pc2);
- exchangeOfferAnswer(pc1, pc2);
- await metadataToBeLoaded;
+ await exchangeOfferAnswer(pc1, pc2);
+ v.srcObject = new MediaStream([(await haveTrackEvent).track]);
+ await new Promise(r => v.onloadedmetadata = r);
// The basic signal is a track with signal 100. We replace this
// with tracks with signal from 0 to 255 and see if they are all
// reliably detected.