blob: 3f0c91ebe65d08b4d11e5b0ead5d617d9b378d0a [file] [log] [blame]
<!DOCTYPE html>
<!--
Create two sources and play them simultaneously. This tests unity-gain summing of AudioNode inputs.
The result should be some laughing playing at the same time as the drumming.
-->
<html>
<head>
<title>
mixing.html
</title>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
<script src="resources/audit-util.js"></script>
<script src="resources/audit.js"></script>
</head>
<body>
<script id="layout-test-code">
let audit = Audit.createTaskRunner();
let sampleRate = 44100.0;
let lengthInSeconds = 2;
audit.define('test', (task, should) => {
// Create offline audio context.
let context = new OfflineAudioContext(
2, sampleRate * lengthInSeconds, sampleRate);
// Load up audio files and test
Promise
.all([
// This file is stereo
Audit.loadFileFromUrl('resources/hyper-reality/br-jam-loop.wav')
.then(response => {
return context.decodeAudioData(response);
}),
// This file is mono
Audit.loadFileFromUrl('resources/hyper-reality/laughter.wav')
.then(response => {
return context.decodeAudioData(response);
}),
])
.then(audioBuffers => {
// Thresholds are experimentally determined
return runTest(context, audioBuffers, should, [
{snrThreshold: Infinity, errorThreshold: 0},
{snrThreshold: Infinity, errorThreshold: 0}
]);
})
.then(() => task.done());
});
audit.run();
function runTest(context, bufferList, should, testThresholds) {
should(bufferList.length, 'Number of decoded files').beEqualTo(2);
// Create two sources and play them at the same time.
let source1 = context.createBufferSource();
let source2 = context.createBufferSource();
source1.buffer = bufferList[0];
source2.buffer = bufferList[1];
source1.connect(context.destination);
source2.connect(context.destination);
source1.start(0);
source2.start(0);
// Verify the number of channels in each source and the expected result.
should(
bufferList[0].numberOfChannels,
'Number of channels in stereo source')
.beEqualTo(2);
should(
bufferList[1].numberOfChannels, 'Number of channels in mono source')
.beEqualTo(1);
return context.startRendering().then(audioBuffer => {
verifyResult(
audioBuffer, context, bufferList, testThresholds, should);
});
}
function verifyResult(
renderedBuffer, context, bufferList, testThresholds, should) {
// Test only works if we have a stereo result.
should(
renderedBuffer.numberOfChannels,
'Number of channels in rendered output')
.beEqualTo(2);
// Note: the source lengths may not match the context length. Create
// copies of the sources truncated or zero-filled to the rendering
// length.
let stereoSource = new AudioBuffer({
length: renderedBuffer.length,
numberOfChannels: 2,
sampleRate: context.sampleRate
});
stereoSource.copyToChannel(bufferList[0].getChannelData(0), 0);
stereoSource.copyToChannel(bufferList[0].getChannelData(1), 1);
let monoSource = new AudioBuffer({
length: renderedBuffer.length,
numberOfChannels: 1,
sampleRate: context.sampleRate
});
monoSource.copyToChannel(bufferList[1].getChannelData(0), 0);
// Compute the expected result buffer0 is stereo and buffer1 is mono.
// The result should be stereo, with the mono source implicitly upmixed
// to stereo to produce the expected result.
let expectedBuffer = new AudioBuffer({
length: renderedBuffer.length,
numberOfChannels: 2,
sampleRate: context.sampleRate
});
let monoData = monoSource.getChannelData(0);
for (let c = 0; c < expectedBuffer.numberOfChannels; ++c) {
let expectedData = expectedBuffer.getChannelData(c);
let stereoData = stereoSource.getChannelData(c);
for (let k = 0; k < expectedBuffer.length; ++k) {
expectedData[k] = stereoData[k] + monoData[k];
}
}
// Compare the rendered data with the expected data for each channel.
for (let k = 0; k < renderedBuffer.numberOfChannels; ++k) {
let actualData = renderedBuffer.getChannelData(k);
let expectedData = expectedBuffer.getChannelData(k);
let threshold = testThresholds[k];
let snr = 10 * Math.log10(computeSNR(actualData, expectedData));
should(snr, 'SNR for channel ' + k)
.beGreaterThanOrEqualTo(threshold.snrThreshold);
should(actualData, 'Rendered audio').beCloseToArray(expectedData, {
absoluteThreshold: threshold.errorThreshold
});
}
}
</script>
</body>
</html>