blob: e88f6a51cd040f720937c157eefae99305df7595 [file] [log] [blame]
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/filters/chunk_demuxer.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <utility>
#include "base/bind.h"
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/mock_demuxer_host.h"
#include "media/base/mock_media_log.h"
#include "media/base/test_data_util.h"
#include "media/base/test_helpers.h"
#include "media/base/timestamp_constants.h"
#include "media/formats/webm/cluster_builder.h"
#include "media/formats/webm/webm_cluster_parser.h"
#include "media/formats/webm/webm_constants.h"
#include "media/media_features.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::AnyNumber;
using ::testing::Exactly;
using ::testing::InSequence;
using ::testing::NotNull;
using ::testing::Return;
using ::testing::SaveArg;
using ::testing::SetArgumentPointee;
using ::testing::StrictMock;
using ::testing::_;
namespace media {
const uint8_t kTracksHeader[] = {
0x16, 0x54, 0xAE, 0x6B, // Tracks ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
};
// WebM Block bytes that represent a VP8 key frame.
const uint8_t kVP8Keyframe[] = {0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a,
0x00, 0x10, 0x00, 0x10, 0x00};
// WebM Block bytes that represent a VP8 interframe.
const uint8_t kVP8Interframe[] = {0x11, 0x00, 0x00};
const uint8_t kCuesHeader[] = {
0x1C, 0x53, 0xBB, 0x6B, // Cues ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
};
const uint8_t kEncryptedMediaInitData[] = {
0x68, 0xFE, 0xF9, 0xA1, 0xB3, 0x0D, 0x6B, 0x4D,
0xF2, 0x22, 0xB5, 0x0B, 0x4D, 0xE9, 0xE9, 0x95,
};
const int kTracksHeaderSize = sizeof(kTracksHeader);
const int kTracksSizeOffset = 4;
// The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
// at index 1 and spans 8 bytes.
const int kAudioTrackSizeOffset = 1;
const int kAudioTrackSizeWidth = 8;
const int kAudioTrackEntryHeaderSize =
kAudioTrackSizeOffset + kAudioTrackSizeWidth;
// The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
// index 1 and spans 8 bytes.
const int kVideoTrackSizeOffset = 1;
const int kVideoTrackSizeWidth = 8;
const int kVideoTrackEntryHeaderSize =
kVideoTrackSizeOffset + kVideoTrackSizeWidth;
const int kVideoTrackNum = 1;
const int kAudioTrackNum = 2;
const int kTextTrackNum = 3;
const int kAlternateTextTrackNum = 4;
const int kAudioBlockDuration = 23;
const int kVideoBlockDuration = 33;
const int kTextBlockDuration = 100;
const int kBlockSize = 10;
const char kSourceId[] = "SourceId";
const char kDefaultFirstClusterRange[] = "{ [0,46) }";
const int kDefaultFirstClusterEndTimestamp = 66;
const int kDefaultSecondClusterEndTimestamp = 132;
base::TimeDelta kDefaultDuration() {
return base::TimeDelta::FromMilliseconds(201224);
}
// Write an integer into buffer in the form of vint that spans 8 bytes.
// The data pointed by |buffer| should be at least 8 bytes long.
// |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
static void WriteInt64(uint8_t* buffer, int64_t number) {
DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
buffer[0] = 0x01;
int64_t tmp = number;
for (int i = 7; i > 0; i--) {
buffer[i] = tmp & 0xff;
tmp >>= 8;
}
}
MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
return arg.get() && !arg->end_of_stream() &&
arg->timestamp().InMilliseconds() == timestamp_in_ms;
}
MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
MATCHER_P(SegmentMissingFrames, frame_types_string, "") {
return CONTAINS_STRING(arg, "Media segment did not contain any " +
std::string(frame_types_string) +
" coded frames, mismatching initialization "
"segment. Therefore, MSE coded frame "
"processing may not interoperably detect "
"discontinuities in appended media.");
}
MATCHER(StreamParsingFailed, "") {
return CONTAINS_STRING(arg, "Append: stream parsing failed.");
}
MATCHER_P(FoundStream, stream_type_string, "") {
return CONTAINS_STRING(
arg, "found_" + std::string(stream_type_string) + "_stream") &&
CONTAINS_STRING(arg, "true");
}
MATCHER_P2(CodecName, stream_type_string, codec_string, "") {
return CONTAINS_STRING(arg,
std::string(stream_type_string) + "_codec_name") &&
CONTAINS_STRING(arg, std::string(codec_string));
}
MATCHER_P2(InitSegmentMismatchesMimeType,
track_type_string_with_article,
mime_missing_track_type_bool,
"") {
return CONTAINS_STRING(
arg, "Initialization segment " +
std::string(mime_missing_track_type_bool ? "has "
: "does not have ") +
std::string(track_type_string_with_article) +
" track, but the mimetype " +
std::string(mime_missing_track_type_bool ? "does not specify "
: "specifies ") +
std::string(track_type_string_with_article) + " codec.");
}
MATCHER_P2(GeneratedSplice, duration_microseconds, time_microseconds, "") {
return CONTAINS_STRING(arg, "Generated splice of overlap duration " +
base::IntToString(duration_microseconds) +
"us into new buffer at " +
base::IntToString(time_microseconds) + "us.");
}
MATCHER_P2(SkippingSpliceAtOrBefore,
new_microseconds,
existing_microseconds,
"") {
return CONTAINS_STRING(
arg, "Skipping splice frame generation: first new buffer at " +
base::IntToString(new_microseconds) +
"us begins at or before existing buffer at " +
base::IntToString(existing_microseconds) + "us.");
}
MATCHER_P(SkippingSpliceAlreadySpliced, time_microseconds, "") {
return CONTAINS_STRING(
arg, "Skipping splice frame generation: overlapped buffers at " +
base::IntToString(time_microseconds) +
"us are in a previously buffered splice.");
}
MATCHER_P(WebMSimpleBlockDurationEstimated, estimated_duration_ms, "") {
return CONTAINS_STRING(arg, "Estimating WebM block duration to be " +
base::IntToString(estimated_duration_ms) +
"ms for the last (Simple)Block in the "
"Cluster for this Track. Use BlockGroups "
"with BlockDurations at the end of each "
"Track in a Cluster to avoid estimation.");
}
MATCHER_P(WebMNegativeTimecodeOffset, timecode_string, "") {
return CONTAINS_STRING(arg, "Got a block with negative timecode offset " +
std::string(timecode_string));
}
MATCHER(WebMOutOfOrderTimecode, "") {
return CONTAINS_STRING(
arg, "Got a block with a timecode before the previous block.");
}
MATCHER(WebMClusterBeforeFirstInfo, "") {
return CONTAINS_STRING(arg, "Found Cluster element before Info.");
}
static void OnReadDone(const base::TimeDelta& expected_time,
bool* called,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
EXPECT_EQ(status, DemuxerStream::kOk);
EXPECT_EQ(expected_time, buffer->timestamp());
*called = true;
}
static void OnReadDone_AbortExpected(
bool* called, DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
EXPECT_EQ(status, DemuxerStream::kAborted);
EXPECT_EQ(NULL, buffer.get());
*called = true;
}
static void OnReadDone_EOSExpected(bool* called,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
EXPECT_EQ(status, DemuxerStream::kOk);
EXPECT_TRUE(buffer->end_of_stream());
*called = true;
}
static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
EXPECT_EQ(status, PIPELINE_OK);
*called = true;
}
class ChunkDemuxerTest : public ::testing::Test {
protected:
enum CodecsIndex {
AUDIO,
VIDEO,
MAX_CODECS_INDEX
};
// Default cluster to append first for simple tests.
scoped_ptr<Cluster> kDefaultFirstCluster() {
return GenerateCluster(0, 4);
}
// Default cluster to append after kDefaultFirstCluster()
// has been appended. This cluster starts with blocks that
// have timestamps consistent with the end times of the blocks
// in kDefaultFirstCluster() so that these two clusters represent
// a continuous region.
scoped_ptr<Cluster> kDefaultSecondCluster() {
return GenerateCluster(46, 66, 5);
}
ChunkDemuxerTest()
: media_log_(new StrictMock<MockMediaLog>()),
append_window_end_for_next_append_(kInfiniteDuration()) {
init_segment_received_cb_ =
base::Bind(&ChunkDemuxerTest::InitSegmentReceived,
base::Unretained(this));
CreateNewDemuxer();
}
void CreateNewDemuxer() {
base::Closure open_cb =
base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb = base::Bind(
&ChunkDemuxerTest::OnEncryptedMediaInitData, base::Unretained(this));
demuxer_.reset(new ChunkDemuxer(open_cb, encrypted_media_init_data_cb,
media_log_, true));
}
virtual ~ChunkDemuxerTest() {
ShutdownDemuxer();
}
void CreateInitSegment(int stream_flags,
bool is_audio_encrypted,
bool is_video_encrypted,
scoped_ptr<uint8_t[]>* buffer,
int* size) {
CreateInitSegmentInternal(
stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
size);
}
void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
bool is_audio_encrypted,
bool is_video_encrypted,
scoped_ptr<uint8_t[]>* buffer,
int* size) {
DCHECK(stream_flags & HAS_TEXT);
CreateInitSegmentInternal(
stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
size);
}
void CreateInitSegmentInternal(int stream_flags,
bool is_audio_encrypted,
bool is_video_encrypted,
scoped_ptr<uint8_t[]>* buffer,
bool use_alternate_text_track_id,
int* size) {
bool has_audio = (stream_flags & HAS_AUDIO) != 0;
bool has_video = (stream_flags & HAS_VIDEO) != 0;
bool has_text = (stream_flags & HAS_TEXT) != 0;
scoped_refptr<DecoderBuffer> ebml_header;
scoped_refptr<DecoderBuffer> info;
scoped_refptr<DecoderBuffer> audio_track_entry;
scoped_refptr<DecoderBuffer> video_track_entry;
scoped_refptr<DecoderBuffer> audio_content_encodings;
scoped_refptr<DecoderBuffer> video_content_encodings;
scoped_refptr<DecoderBuffer> text_track_entry;
ebml_header = ReadTestDataFile("webm_ebml_element");
info = ReadTestDataFile("webm_info_element");
int tracks_element_size = 0;
if (has_audio) {
audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
tracks_element_size += audio_track_entry->data_size();
if (is_audio_encrypted) {
audio_content_encodings = ReadTestDataFile("webm_content_encodings");
tracks_element_size += audio_content_encodings->data_size();
}
}
if (has_video) {
video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
tracks_element_size += video_track_entry->data_size();
if (is_video_encrypted) {
video_content_encodings = ReadTestDataFile("webm_content_encodings");
tracks_element_size += video_content_encodings->data_size();
}
}
if (has_text) {
// TODO(matthewjheaney): create an abstraction to do
// this (http://crbug/321454).
// We need it to also handle the creation of multiple text tracks.
//
// This is the track entry for a text track,
// TrackEntry [AE], size=30
// TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
// TrackUID [73] [C5], size=1, value=3 (must remain constant for same
// track, even if TrackNum changes)
// TrackType [83], size=1, val=0x11
// CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
"\x83\x81\x11\x86\x92"
"D_WEBVTT/SUBTITLES";
DCHECK_EQ(str[4], kTextTrackNum);
if (use_alternate_text_track_id)
str[4] = kAlternateTextTrackNum;
const int len = strlen(str);
DCHECK_EQ(len, 32);
const uint8_t* const buf = reinterpret_cast<const uint8_t*>(str);
text_track_entry = DecoderBuffer::CopyFrom(buf, len);
tracks_element_size += text_track_entry->data_size();
}
*size = ebml_header->data_size() + info->data_size() +
kTracksHeaderSize + tracks_element_size;
buffer->reset(new uint8_t[*size]);
uint8_t* buf = buffer->get();
memcpy(buf, ebml_header->data(), ebml_header->data_size());
buf += ebml_header->data_size();
memcpy(buf, info->data(), info->data_size());
buf += info->data_size();
memcpy(buf, kTracksHeader, kTracksHeaderSize);
WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
buf += kTracksHeaderSize;
// TODO(xhwang): Simplify this! Probably have test data files that contain
// ContentEncodings directly instead of trying to create one at run-time.
if (has_audio) {
memcpy(buf, audio_track_entry->data(),
audio_track_entry->data_size());
if (is_audio_encrypted) {
memcpy(buf + audio_track_entry->data_size(),
audio_content_encodings->data(),
audio_content_encodings->data_size());
WriteInt64(buf + kAudioTrackSizeOffset,
audio_track_entry->data_size() +
audio_content_encodings->data_size() -
kAudioTrackEntryHeaderSize);
buf += audio_content_encodings->data_size();
}
buf += audio_track_entry->data_size();
}
if (has_video) {
memcpy(buf, video_track_entry->data(),
video_track_entry->data_size());
if (is_video_encrypted) {
memcpy(buf + video_track_entry->data_size(),
video_content_encodings->data(),
video_content_encodings->data_size());
WriteInt64(buf + kVideoTrackSizeOffset,
video_track_entry->data_size() +
video_content_encodings->data_size() -
kVideoTrackEntryHeaderSize);
buf += video_content_encodings->data_size();
}
buf += video_track_entry->data_size();
}
if (has_text) {
memcpy(buf, text_track_entry->data(),
text_track_entry->data_size());
buf += text_track_entry->data_size();
}
}
ChunkDemuxer::Status AddId() {
return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
}
ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
bool has_audio = (stream_flags & HAS_AUDIO) != 0;
bool has_video = (stream_flags & HAS_VIDEO) != 0;
std::vector<std::string> codecs;
std::string type;
if (has_audio) {
codecs.push_back("vorbis");
type = "audio/webm";
}
if (has_video) {
codecs.push_back("vp8");
type = "video/webm";
}
if (!has_audio && !has_video) {
return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
}
return demuxer_->AddId(source_id, type, codecs);
}
#if BUILDFLAG(ENABLE_MSE_MPEG2TS_STREAM_PARSER)
ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
std::vector<std::string> codecs;
std::string type = "video/mp2t";
codecs.push_back("mp4a.40.2");
codecs.push_back("avc1.640028");
return demuxer_->AddId(source_id, type, codecs);
}
#endif
void AppendData(const uint8_t* data, size_t length) {
AppendData(kSourceId, data, length);
}
void AppendCluster(const std::string& source_id,
scoped_ptr<Cluster> cluster) {
AppendData(source_id, cluster->data(), cluster->size());
}
void AppendCluster(scoped_ptr<Cluster> cluster) {
AppendCluster(kSourceId, std::move(cluster));
}
void AppendCluster(int timecode, int block_count) {
AppendCluster(GenerateCluster(timecode, block_count));
}
void AppendSingleStreamCluster(const std::string& source_id, int track_number,
int timecode, int block_count) {
int block_duration = 0;
switch (track_number) {
case kVideoTrackNum:
block_duration = kVideoBlockDuration;
break;
case kAudioTrackNum:
block_duration = kAudioBlockDuration;
break;
case kTextTrackNum: // Fall-through.
case kAlternateTextTrackNum:
block_duration = kTextBlockDuration;
break;
}
ASSERT_NE(block_duration, 0);
int end_timecode = timecode + block_count * block_duration;
AppendCluster(source_id,
GenerateSingleStreamCluster(
timecode, end_timecode, track_number, block_duration));
}
struct BlockInfo {
BlockInfo()
: track_number(0),
timestamp_in_ms(0),
flags(0),
duration(0) {
}
BlockInfo(int tn, int ts, int f, int d)
: track_number(tn),
timestamp_in_ms(ts),
flags(f),
duration(d) {
}
int track_number;
int timestamp_in_ms;
int flags;
int duration;
bool operator< (const BlockInfo& rhs) const {
return timestamp_in_ms < rhs.timestamp_in_ms;
}
};
// |track_number| - The track number to place in
// |block_descriptions| - A space delimited string of block info that
// is used to populate |blocks|. Each block info has a timestamp in
// milliseconds and optionally followed by a 'K' to indicate that a block
// should be marked as a key frame. For example "0K 30 60" should populate
// |blocks| with 3 BlockInfo objects: a key frame with timestamp 0 and 2
// non-key-frames at 30ms and 60ms.
// Every block will be a SimpleBlock, with the exception that the last block
// may have an optional duration delimited with a 'D' and appended to the
// block info timestamp, prior to the optional keyframe 'K'. For example "0K
// 30 60D10K" indicates that the last block will be a keyframe BlockGroup
// with duration 10ms.
void ParseBlockDescriptions(int track_number,
const std::string block_descriptions,
std::vector<BlockInfo>* blocks) {
std::vector<std::string> timestamps = base::SplitString(
block_descriptions, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
for (size_t i = 0; i < timestamps.size(); ++i) {
std::string timestamp_str = timestamps[i];
BlockInfo block_info;
block_info.track_number = track_number;
block_info.flags = 0;
block_info.duration = 0;
if (base::EndsWith(timestamp_str, "K", base::CompareCase::SENSITIVE)) {
block_info.flags = kWebMFlagKeyframe;
// Remove the "K" off of the token.
timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
}
size_t duration_pos = timestamp_str.find('D');
const bool explicit_duration = duration_pos != std::string::npos;
const bool is_last_block = i == timestamps.size() - 1;
CHECK(!explicit_duration || is_last_block);
if (explicit_duration) {
CHECK(base::StringToInt(timestamp_str.substr(duration_pos + 1),
&block_info.duration));
timestamp_str = timestamp_str.substr(0, duration_pos);
}
CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
if (track_number == kTextTrackNum ||
track_number == kAlternateTextTrackNum) {
block_info.duration = kTextBlockDuration;
ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
<< "Text block with timestamp " << block_info.timestamp_in_ms
<< " was not marked as a key frame."
<< " All text blocks must be key frames";
}
if (track_number == kAudioTrackNum)
ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
blocks->push_back(block_info);
}
}
scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
bool unknown_size) {
DCHECK_GT(blocks.size(), 0u);
ClusterBuilder cb;
std::vector<uint8_t> data(10);
for (size_t i = 0; i < blocks.size(); ++i) {
if (i == 0)
cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
if (blocks[i].duration) {
if (blocks[i].track_number == kVideoTrackNum) {
AddVideoBlockGroup(&cb,
blocks[i].track_number, blocks[i].timestamp_in_ms,
blocks[i].duration, blocks[i].flags);
} else {
cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
blocks[i].duration, blocks[i].flags,
&data[0], data.size());
}
} else {
cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
blocks[i].flags,
&data[0], data.size());
}
}
return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
}
scoped_ptr<Cluster> GenerateCluster(
std::priority_queue<BlockInfo> block_queue,
bool unknown_size) {
std::vector<BlockInfo> blocks(block_queue.size());
for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
blocks[i] = block_queue.top();
block_queue.pop();
}
return GenerateCluster(blocks, unknown_size);
}
// |block_descriptions| - The block descriptions used to construct the
// cluster. See the documentation for ParseBlockDescriptions() for details on
// the string format.
void AppendSingleStreamCluster(const std::string& source_id, int track_number,
const std::string& block_descriptions) {
std::vector<BlockInfo> blocks;
ParseBlockDescriptions(track_number, block_descriptions, &blocks);
AppendCluster(source_id, GenerateCluster(blocks, false));
}
struct MuxedStreamInfo {
MuxedStreamInfo()
: track_number(0),
block_descriptions(""),
last_blocks_estimated_duration(-1) {}
MuxedStreamInfo(int track_num, const char* block_desc)
: track_number(track_num),
block_descriptions(block_desc),
last_blocks_estimated_duration(-1) {}
MuxedStreamInfo(int track_num,
const char* block_desc,
int last_block_duration_estimate)
: track_number(track_num),
block_descriptions(block_desc),
last_blocks_estimated_duration(last_block_duration_estimate) {}
int track_number;
// The block description passed to ParseBlockDescriptions().
// See the documentation for that method for details on the string format.
const char* block_descriptions;
// If -1, no WebMSimpleBlockDurationEstimated MediaLog expectation is added
// when appending the resulting cluster. Otherwise, an expectation (in ms)
// is added.
int last_blocks_estimated_duration;
};
void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
const MuxedStreamInfo& msi_2) {
std::vector<MuxedStreamInfo> msi(2);
msi[0] = msi_1;
msi[1] = msi_2;
AppendMuxedCluster(msi);
}
void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
const MuxedStreamInfo& msi_2,
const MuxedStreamInfo& msi_3) {
std::vector<MuxedStreamInfo> msi(3);
msi[0] = msi_1;
msi[1] = msi_2;
msi[2] = msi_3;
AppendMuxedCluster(msi);
}
scoped_ptr<Cluster> GenerateMuxedCluster(
const std::vector<MuxedStreamInfo> msi) {
std::priority_queue<BlockInfo> block_queue;
for (size_t i = 0; i < msi.size(); ++i) {
std::vector<BlockInfo> track_blocks;
ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
&track_blocks);
for (size_t j = 0; j < track_blocks.size(); ++j) {
block_queue.push(track_blocks[j]);
}
if (msi[i].last_blocks_estimated_duration != -1) {
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(
msi[i].last_blocks_estimated_duration));
}
}
return GenerateCluster(block_queue, false);
}
void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
AppendCluster(kSourceId, GenerateMuxedCluster(msi));
}
void AppendData(const std::string& source_id,
const uint8_t* data,
size_t length) {
EXPECT_CALL(host_, OnBufferedTimeRangesChanged(_)).Times(AnyNumber());
demuxer_->AppendData(source_id, data, length,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
&timestamp_offset_map_[source_id],
init_segment_received_cb_);
}
void AppendDataInPieces(const uint8_t* data, size_t length) {
AppendDataInPieces(data, length, 7);
}
void AppendDataInPieces(const uint8_t* data,
size_t length,
size_t piece_size) {
const uint8_t* start = data;
const uint8_t* end = data + length;
while (start < end) {
size_t append_size = std::min(piece_size,
static_cast<size_t>(end - start));
AppendData(start, append_size);
start += append_size;
}
}
void AppendInitSegment(int stream_flags) {
AppendInitSegmentWithSourceId(kSourceId, stream_flags);
}
void AppendInitSegmentWithSourceId(const std::string& source_id,
int stream_flags) {
AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
}
void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
int stream_flags,
bool is_audio_encrypted,
bool is_video_encrypted) {
scoped_ptr<uint8_t[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegment(stream_flags,
is_audio_encrypted, is_video_encrypted,
&info_tracks, &info_tracks_size);
AppendData(source_id, info_tracks.get(), info_tracks_size);
}
void AppendGarbage() {
// Fill up an array with gibberish.
int garbage_cluster_size = 10;
scoped_ptr<uint8_t[]> garbage_cluster(new uint8_t[garbage_cluster_size]);
for (int i = 0; i < garbage_cluster_size; ++i)
garbage_cluster[i] = i;
AppendData(garbage_cluster.get(), garbage_cluster_size);
}
void InitDoneCalled(PipelineStatus expected_status,
PipelineStatus status) {
EXPECT_EQ(status, expected_status);
}
PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
PipelineStatus expected_status) {
if (expected_duration != kNoTimestamp())
EXPECT_CALL(host_, SetDuration(expected_duration));
return CreateInitDoneCB(expected_status);
}
PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
base::Unretained(this),
expected_status);
}
enum StreamFlags {
HAS_AUDIO = 1 << 0,
HAS_VIDEO = 1 << 1,
HAS_TEXT = 1 << 2
};
bool InitDemuxer(int stream_flags) {
return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
}
void ExpectInitMediaLogs(int stream_flags) {
if (stream_flags & HAS_AUDIO) {
EXPECT_MEDIA_LOG(FoundStream("audio"));
EXPECT_MEDIA_LOG(CodecName("audio", "vorbis"));
}
if (stream_flags & HAS_VIDEO) {
EXPECT_MEDIA_LOG(FoundStream("video"));
EXPECT_MEDIA_LOG(CodecName("video", "vp8"));
}
}
bool InitDemuxerWithEncryptionInfo(
int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
PipelineStatus expected_status =
(stream_flags != 0) ? PIPELINE_OK : PIPELINE_ERROR_DECODE;
base::TimeDelta expected_duration = kNoTimestamp();
if (expected_status == PIPELINE_OK)
expected_duration = kDefaultDuration();
EXPECT_CALL(*this, DemuxerOpened());
if (is_audio_encrypted || is_video_encrypted) {
DCHECK(!is_audio_encrypted || stream_flags & HAS_AUDIO);
DCHECK(!is_video_encrypted || stream_flags & HAS_VIDEO);
int need_key_count =
(is_audio_encrypted ? 1 : 0) + (is_video_encrypted ? 1 : 0);
EXPECT_CALL(*this, OnEncryptedMediaInitData(
EmeInitDataType::WEBM,
std::vector<uint8_t>(
kEncryptedMediaInitData,
kEncryptedMediaInitData +
arraysize(kEncryptedMediaInitData))))
.Times(Exactly(need_key_count));
}
// Adding expectations prior to CreateInitDoneCB() here because InSequence
// tests require init segment received before duration set. Also, only
// expect an init segment received callback if there is actually a track in
// it.
if (stream_flags != 0) {
ExpectInitMediaLogs(stream_flags);
EXPECT_CALL(*this, InitSegmentReceived());
} else {
// OnNewConfigs() requires at least one audio, video, or text track.
EXPECT_MEDIA_LOG(StreamParsingFailed());
}
demuxer_->Initialize(
&host_, CreateInitDoneCB(expected_duration, expected_status), true);
if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
return false;
AppendInitSegmentWithEncryptedInfo(
kSourceId, stream_flags,
is_audio_encrypted, is_video_encrypted);
return true;
}
bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
const std::string& video_id,
bool has_text) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
return false;
if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
return false;
int audio_flags = HAS_AUDIO;
int video_flags = HAS_VIDEO;
if (has_text) {
audio_flags |= HAS_TEXT;
video_flags |= HAS_TEXT;
}
// Note: Unlike InitDemuxerWithEncryptionInfo, this method is currently
// incompatible with InSequence tests. Refactoring of the duration
// set expectation to not be added during CreateInitDoneCB() could fix this.
ExpectInitMediaLogs(audio_flags);
EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(audio_id, audio_flags);
ExpectInitMediaLogs(video_flags);
EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(video_id, video_flags);
return true;
}
bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
const std::string& video_id) {
return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
}
// Initializes the demuxer with data from 2 files with different
// decoder configurations. This is used to test the decoder config change
// logic.
//
// bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
// bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
// The resulting video stream returns data from each file for the following
// time ranges.
// bear-320x240.webm : [0-501) [801-2736)
// bear-640x360.webm : [527-793)
//
// bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data size.
// bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data size.
// The resulting audio stream returns data from each file for the following
// time ranges.
// bear-320x240.webm : [0-524) [779-2736)
// bear-640x360.webm : [527-759)
bool InitDemuxerWithConfigChangeData() {
scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
EXPECT_CALL(*this, DemuxerOpened());
// Adding expectation prior to CreateInitDoneCB() here because InSequence
// tests require init segment received before duration set.
ExpectInitMediaLogs(HAS_AUDIO | HAS_VIDEO);
EXPECT_CALL(*this, InitSegmentReceived());
demuxer_->Initialize(
&host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
PIPELINE_OK), true);
if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
return false;
// Append the whole bear1 file.
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(2)).Times(7);
// Expect duration adjustment since actual duration differs slightly from
// duration in the init segment.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
AppendData(bear1->data(), bear1->data_size());
// Last audio frame has timestamp 2721 and duration 24 (estimated from max
// seen so far for audio track).
// Last video frame has timestamp 2703 and duration 33 (from TrackEntry
// DefaultDuration for video track).
CheckExpectedRanges("{ [0,2736) }");
// Append initialization segment for bear2.
// Note: Offsets here and below are derived from
// media/test/data/bear-640x360-manifest.js and
// media/test/data/bear-320x240-manifest.js which were
// generated from media/test/data/bear-640x360.webm and
// media/test/data/bear-320x240.webm respectively.
EXPECT_CALL(*this, InitSegmentReceived());
AppendData(bear2->data(), 4340);
// Append a media segment that goes from [0.527000, 1.014000).
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(2));
EXPECT_MEDIA_LOG(GeneratedSplice(20000, 527000));
AppendData(bear2->data() + 55290, 18785);
CheckExpectedRanges("{ [0,1027) [1201,2736) }");
// Append initialization segment for bear1 & fill gap with [779-1197)
// segment.
EXPECT_CALL(*this, InitSegmentReceived());
AppendData(bear1->data(), 4370);
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(23));
EXPECT_MEDIA_LOG(GeneratedSplice(26000, 779000));
AppendData(bear1->data() + 72737, 28183);
CheckExpectedRanges("{ [0,2736) }");
MarkEndOfStream(PIPELINE_OK);
return true;
}
void ShutdownDemuxer() {
if (demuxer_) {
demuxer_->Shutdown();
message_loop_.RunUntilIdle();
}
}
void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64_t timecode) {
uint8_t data[] = {0x00};
cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
}
scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
return GenerateCluster(timecode, timecode, block_count);
}
void AddVideoBlockGroup(ClusterBuilder* cb,
int track_num,
int64_t timecode,
int duration,
int flags) {
const uint8_t* data =
(flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
sizeof(kVP8Interframe);
cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
}
scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
int first_video_timecode,
int block_count) {
return GenerateCluster(first_audio_timecode, first_video_timecode,
block_count, false);
}
scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
int first_video_timecode,
int block_count,
bool unknown_size) {
CHECK_GT(block_count, 0);
std::priority_queue<BlockInfo> block_queue;
if (block_count == 1) {
block_queue.push(BlockInfo(kAudioTrackNum,
first_audio_timecode,
kWebMFlagKeyframe,
kAudioBlockDuration));
return GenerateCluster(block_queue, unknown_size);
}
int audio_timecode = first_audio_timecode;
int video_timecode = first_video_timecode;
// Create simple blocks for everything except the last 2 blocks.
// The first video frame must be a key frame.
uint8_t video_flag = kWebMFlagKeyframe;
for (int i = 0; i < block_count - 2; i++) {
if (audio_timecode <= video_timecode) {
block_queue.push(BlockInfo(kAudioTrackNum,
audio_timecode,
kWebMFlagKeyframe,
0));
audio_timecode += kAudioBlockDuration;
continue;
}
block_queue.push(BlockInfo(kVideoTrackNum,
video_timecode,
video_flag,
0));
video_timecode += kVideoBlockDuration;
video_flag = 0;
}
// Make the last 2 blocks BlockGroups so that they don't get delayed by the
// block duration calculation logic.
block_queue.push(BlockInfo(kAudioTrackNum,
audio_timecode,
kWebMFlagKeyframe,
kAudioBlockDuration));
block_queue.push(BlockInfo(kVideoTrackNum,
video_timecode,
video_flag,
kVideoBlockDuration));
return GenerateCluster(block_queue, unknown_size);
}
scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
int end_timecode,
int track_number,
int block_duration) {
CHECK_GT(end_timecode, timecode);
std::vector<uint8_t> data(kBlockSize);
ClusterBuilder cb;
cb.SetClusterTimecode(timecode);
// Create simple blocks for everything except the last block.
while (timecode < (end_timecode - block_duration)) {
cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
&data[0], data.size());
timecode += block_duration;
}
if (track_number == kVideoTrackNum) {
AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
kWebMFlagKeyframe);
} else {
cb.AddBlockGroup(track_number, timecode, block_duration,
kWebMFlagKeyframe, &data[0], data.size());
}
return cb.Finish();
}
void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
demuxer_->GetStream(type)->Read(read_cb);
message_loop_.RunUntilIdle();
}
void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
Read(DemuxerStream::AUDIO, read_cb);
}
void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
Read(DemuxerStream::VIDEO, read_cb);
}
void GenerateExpectedReads(int timecode, int block_count) {
GenerateExpectedReads(timecode, timecode, block_count);
}
void GenerateExpectedReads(int start_audio_timecode,
int start_video_timecode,
int block_count) {
CHECK_GT(block_count, 0);
if (block_count == 1) {
ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
return;
}
int audio_timecode = start_audio_timecode;
int video_timecode = start_video_timecode;
for (int i = 0; i < block_count; i++) {
if (audio_timecode <= video_timecode) {
ExpectRead(DemuxerStream::AUDIO, audio_timecode);
audio_timecode += kAudioBlockDuration;
continue;
}
ExpectRead(DemuxerStream::VIDEO, video_timecode);
video_timecode += kVideoBlockDuration;
}
}
void GenerateSingleStreamExpectedReads(int timecode,
int block_count,
DemuxerStream::Type type,
int block_duration) {
CHECK_GT(block_count, 0);
int stream_timecode = timecode;
for (int i = 0; i < block_count; i++) {
ExpectRead(type, stream_timecode);
stream_timecode += block_duration;
}
}
void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
GenerateSingleStreamExpectedReads(
timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
}
void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
GenerateSingleStreamExpectedReads(
timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
}
scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
ClusterBuilder cb;
cb.SetClusterTimecode(timecode);
return cb.Finish();
}
void CheckExpectedRangesForMediaSource(const std::string& expected) {
CheckExpectedRanges(demuxer_->GetBufferedRanges(), expected);
}
void CheckExpectedRanges(const std::string& expected) {
CheckExpectedRanges(kSourceId, expected);
CheckExpectedRangesForMediaSource(expected);
}
void CheckExpectedRanges(const std::string& id, const std::string& expected) {
CheckExpectedRanges(demuxer_->GetBufferedRanges(id), expected);
}
void CheckExpectedRanges(DemuxerStream::Type type,
const std::string& expected) {
ChunkDemuxerStream* stream =
static_cast<ChunkDemuxerStream*>(demuxer_->GetStream(type));
CheckExpectedRanges(stream->GetBufferedRanges(kDefaultDuration()),
expected);
}
void CheckExpectedRanges(const Ranges<base::TimeDelta>& r,
const std::string& expected) {
std::stringstream ss;
ss << "{ ";
for (size_t i = 0; i < r.size(); ++i) {
ss << "[" << r.start(i).InMilliseconds() << ","
<< r.end(i).InMilliseconds() << ") ";
}
ss << "}";
EXPECT_EQ(expected, ss.str());
}
MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>&));
void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
scoped_refptr<DecoderBuffer>* buffer_out,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
*status_out = status;
*buffer_out = buffer;
}
void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
DemuxerStream::Status* status,
base::TimeDelta* last_timestamp) {
DemuxerStream* stream = demuxer_->GetStream(type);
scoped_refptr<DecoderBuffer> buffer;
*last_timestamp = kNoTimestamp();
do {
stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
base::Unretained(this), status, &buffer));
base::MessageLoop::current()->RunUntilIdle();
if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
*last_timestamp = buffer->timestamp();
} while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
}
void ExpectEndOfStream(DemuxerStream::Type type) {
EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
demuxer_->GetStream(type)->Read(base::Bind(
&ChunkDemuxerTest::ReadDone, base::Unretained(this)));
message_loop_.RunUntilIdle();
}
void ExpectRead(DemuxerStream::Type type, int64_t timestamp_in_ms) {
EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
HasTimestamp(timestamp_in_ms)));
demuxer_->GetStream(type)->Read(base::Bind(
&ChunkDemuxerTest::ReadDone, base::Unretained(this)));
message_loop_.RunUntilIdle();
}
void ExpectConfigChanged(DemuxerStream::Type type) {
EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
demuxer_->GetStream(type)->Read(base::Bind(
&ChunkDemuxerTest::ReadDone, base::Unretained(this)));
message_loop_.RunUntilIdle();
}
void CheckExpectedBuffers(DemuxerStream* stream,
const std::string& expected) {
std::vector<std::string> timestamps = base::SplitString(
expected, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
std::stringstream ss;
for (size_t i = 0; i < timestamps.size(); ++i) {
// Initialize status to kAborted since it's possible for Read() to return
// without calling StoreStatusAndBuffer() if it doesn't have any buffers
// left to return.
DemuxerStream::Status status = DemuxerStream::kAborted;
scoped_refptr<DecoderBuffer> buffer;
stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
base::Unretained(this), &status, &buffer));
base::MessageLoop::current()->RunUntilIdle();
if (status != DemuxerStream::kOk || buffer->end_of_stream())
break;
if (i > 0)
ss << " ";
ss << buffer->timestamp().InMilliseconds();
if (buffer->is_key_frame())
ss << "K";
// Handle preroll buffers.
if (base::EndsWith(timestamps[i], "P", base::CompareCase::SENSITIVE)) {
ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
ss << "P";
}
}
EXPECT_EQ(expected, ss.str());
}
MOCK_METHOD1(Checkpoint, void(int id));
struct BufferTimestamps {
int video_time_ms;
int audio_time_ms;
};
static const int kSkip = -1;
// Test parsing a WebM file.
// |filename| - The name of the file in media/test/data to parse.
// |timestamps| - The expected timestamps on the parsed buffers.
// a timestamp of kSkip indicates that a Read() call for that stream
// shouldn't be made on that iteration of the loop. If both streams have
// a kSkip then the loop will terminate.
bool ParseWebMFile(const std::string& filename,
const BufferTimestamps* timestamps,
const base::TimeDelta& duration) {
return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
}
bool ParseWebMFile(const std::string& filename,
const BufferTimestamps* timestamps,
const base::TimeDelta& duration,
int stream_flags) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
return false;
// Read a WebM file into memory and send the data to the demuxer.
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
EXPECT_CALL(*this, InitSegmentReceived());
AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
// Verify that the timestamps on the first few packets match what we
// expect.
for (size_t i = 0;
(timestamps[i].audio_time_ms != kSkip ||
timestamps[i].video_time_ms != kSkip);
i++) {
bool audio_read_done = false;
bool video_read_done = false;
if (timestamps[i].audio_time_ms != kSkip) {
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(
timestamps[i].audio_time_ms),
&audio_read_done));
EXPECT_TRUE(audio_read_done);
}
if (timestamps[i].video_time_ms != kSkip) {
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(
timestamps[i].video_time_ms),
&video_read_done));
EXPECT_TRUE(video_read_done);
}
}
return true;
}
MOCK_METHOD0(DemuxerOpened, void());
MOCK_METHOD2(OnEncryptedMediaInitData,
void(EmeInitDataType init_data_type,
const std::vector<uint8_t>& init_data));
MOCK_METHOD0(InitSegmentReceived, void(void));
void Seek(base::TimeDelta seek_time) {
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
message_loop_.RunUntilIdle();
}
void MarkEndOfStream(PipelineStatus status) {
demuxer_->MarkEndOfStream(status);
message_loop_.RunUntilIdle();
}
bool SetTimestampOffset(const std::string& id,
base::TimeDelta timestamp_offset) {
if (demuxer_->IsParsingMediaSegment(id))
return false;
timestamp_offset_map_[id] = timestamp_offset;
return true;
}
base::MessageLoop message_loop_;
MockDemuxerHost host_;
scoped_refptr<StrictMock<MockMediaLog>> media_log_;
scoped_ptr<ChunkDemuxer> demuxer_;
MediaSourceState::InitSegmentReceivedCB init_segment_received_cb_;
base::TimeDelta append_window_start_for_next_append_;
base::TimeDelta append_window_end_for_next_append_;
// Map of source id to timestamp offset to use for the next AppendData()
// operation for that source id.
std::map<std::string, base::TimeDelta> timestamp_offset_map_;
private:
DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
};
TEST_F(ChunkDemuxerTest, Init) {
InSequence s;
// Test no streams, audio-only, video-only, and audio & video scenarios.
// Audio and video streams can be encrypted or not encrypted.
for (int i = 0; i < 16; i++) {
bool has_audio = (i & 0x1) != 0;
bool has_video = (i & 0x2) != 0;
bool is_audio_encrypted = (i & 0x4) != 0;
bool is_video_encrypted = (i & 0x8) != 0;
// No test on invalid combination.
if ((!has_audio && is_audio_encrypted) ||
(!has_video && is_video_encrypted)) {
continue;
}
CreateNewDemuxer();
int stream_flags = 0;
if (has_audio)
stream_flags |= HAS_AUDIO;
if (has_video)
stream_flags |= HAS_VIDEO;
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
stream_flags, is_audio_encrypted, is_video_encrypted));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
if (has_audio) {
ASSERT_TRUE(audio_stream);
const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
EXPECT_EQ(kCodecVorbis, config.codec());
EXPECT_EQ(32, config.bits_per_channel());
EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
EXPECT_EQ(44100, config.samples_per_second());
EXPECT_GT(config.extra_data().size(), 0u);
EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
EXPECT_EQ(is_audio_encrypted,
audio_stream->audio_decoder_config().is_encrypted());
EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(audio_stream);
}
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
if (has_video) {
EXPECT_TRUE(video_stream);
EXPECT_EQ(is_video_encrypted,
video_stream->video_decoder_config().is_encrypted());
EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(video_stream);
}
ShutdownDemuxer();
demuxer_.reset();
}
}
// TODO(acolwell): Fold this test into Init tests since the tests are
// almost identical.
TEST_F(ChunkDemuxerTest, InitText) {
// Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
// No encryption cases handled here.
bool has_video = true;
bool is_audio_encrypted = false;
bool is_video_encrypted = false;
for (int i = 0; i < 2; i++) {
bool has_audio = (i & 0x1) != 0;
CreateNewDemuxer();
DemuxerStream* text_stream = NULL;
TextTrackConfig text_config;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(DoAll(SaveArg<0>(&text_stream),
SaveArg<1>(&text_config)));
int stream_flags = HAS_TEXT;
if (has_audio)
stream_flags |= HAS_AUDIO;
if (has_video)
stream_flags |= HAS_VIDEO;
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
stream_flags, is_audio_encrypted, is_video_encrypted));
ASSERT_TRUE(text_stream);
EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
EXPECT_EQ(kTextSubtitles, text_config.kind());
EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
->supports_partial_append_window_trimming());
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
if (has_audio) {
ASSERT_TRUE(audio_stream);
const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
EXPECT_EQ(kCodecVorbis, config.codec());
EXPECT_EQ(32, config.bits_per_channel());
EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
EXPECT_EQ(44100, config.samples_per_second());
EXPECT_GT(config.extra_data().size(), 0u);
EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
EXPECT_EQ(is_audio_encrypted,
audio_stream->audio_decoder_config().is_encrypted());
EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(audio_stream);
}
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
if (has_video) {
EXPECT_TRUE(video_stream);
EXPECT_EQ(is_video_encrypted,
video_stream->video_decoder_config().is_encrypted());
EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(video_stream);
}
ShutdownDemuxer();
demuxer_.reset();
}
}
TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
// Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
// segment in which the text track ID changes. Verify appended buffers before
// and after the second init segment map to the same underlying track buffers.
CreateNewDemuxer();
DemuxerStream* text_stream = NULL;
TextTrackConfig text_config;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(DoAll(SaveArg<0>(&text_stream),
SaveArg<1>(&text_config)));
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
ASSERT_TRUE(audio_stream);
ASSERT_TRUE(video_stream);
ASSERT_TRUE(text_stream);
AppendMuxedCluster(MuxedStreamInfo(kAudioTrackNum, "0K 23K", 23),
MuxedStreamInfo(kVideoTrackNum, "0K 30", 30),
MuxedStreamInfo(kTextTrackNum, "10K"));
CheckExpectedRanges("{ [0,46) }");
scoped_ptr<uint8_t[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
false, false,
&info_tracks, &info_tracks_size);
EXPECT_CALL(*this, InitSegmentReceived());
demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
&timestamp_offset_map_[kSourceId],
init_segment_received_cb_);
AppendMuxedCluster(
MuxedStreamInfo(kAudioTrackNum, "46K 69K", 23),
MuxedStreamInfo(kVideoTrackNum, "60K",
WebMClusterParser::kDefaultVideoBufferDurationInMs),
MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
CheckExpectedRanges("{ [0,92) }");
CheckExpectedBuffers(audio_stream, "0K 23K 46K 69K");
CheckExpectedBuffers(video_stream, "0K 30 60K");
CheckExpectedBuffers(text_stream, "10K 45K");
ShutdownDemuxer();
}
TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
// Tests that non-key-frames following an init segment are allowed
// and dropped, as expected if the initialization segment received
// algorithm correctly sets the needs random access point flag to true for all
// track buffers. Note that the first initialization segment is insufficient
// to fully test this since needs random access point flag initializes to
// true.
CreateNewDemuxer();
DemuxerStream* text_stream = NULL;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
ASSERT_TRUE(audio_stream && video_stream && text_stream);
AppendMuxedCluster(
MuxedStreamInfo(kAudioTrackNum, "23K",
WebMClusterParser::kDefaultAudioBufferDurationInMs),
MuxedStreamInfo(kVideoTrackNum, "0 30K", 30),
MuxedStreamInfo(kTextTrackNum, "25K 40K"));
CheckExpectedRanges("{ [23,46) }");
EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
AppendMuxedCluster(MuxedStreamInfo(kAudioTrackNum, "46K 69K", 23),
MuxedStreamInfo(kVideoTrackNum, "60 90K", 30),
MuxedStreamInfo(kTextTrackNum, "80K 90K"));
CheckExpectedRanges("{ [23,92) }");
CheckExpectedBuffers(audio_stream, "23K 46K 69K");
CheckExpectedBuffers(video_stream, "30K 90K");
CheckExpectedBuffers(text_stream, "25K 40K 80K 90K");
}
// Make sure that the demuxer reports an error if Shutdown()
// is called before all the initialization segments are appended.
TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
ExpectInitMediaLogs(HAS_AUDIO);
EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
ShutdownDemuxer();
}
TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
EXPECT_CALL(host_, AddTextStream(_, _))
.Times(Exactly(1));
ExpectInitMediaLogs(HAS_VIDEO);
EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
ShutdownDemuxer();
}
// Verifies that all streams waiting for data receive an end of stream
// buffer when Shutdown() is called.
TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
DemuxerStream* text_stream = NULL;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
bool audio_read_done = false;
bool video_read_done = false;
bool text_read_done = false;
audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
message_loop_.RunUntilIdle();
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
EXPECT_FALSE(text_read_done);
ShutdownDemuxer();
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
EXPECT_TRUE(text_read_done);
}
// Test that Seek() completes successfully when the first cluster
// arrives.
TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
InSequence s;
EXPECT_CALL(*this, Checkpoint(1));
Seek(base::TimeDelta::FromMilliseconds(46));
EXPECT_CALL(*this, Checkpoint(2));
Checkpoint(1);
AppendCluster(kDefaultSecondCluster());
message_loop_.RunUntilIdle();
Checkpoint(2);
}
// Test that parsing errors are handled for clusters appended after init.
TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
EXPECT_MEDIA_LOG(StreamParsingFailed());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendGarbage();
}
// Test the case where a Seek() is requested while the parser
// is in the middle of cluster. This is to verify that the parser
// does not reset itself on a seek.
TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
InSequence s;
scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
// Split the cluster into two appends at an arbitrary point near the end.
int first_append_size = cluster_a->size() - 11;
int second_append_size = cluster_a->size() - first_append_size;
// Append the first part of the cluster.
AppendData(cluster_a->data(), first_append_size);
ExpectRead(DemuxerStream::AUDIO, 0);
ExpectRead(DemuxerStream::VIDEO, 0);
ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
Seek(base::TimeDelta::FromSeconds(5));
// Append the rest of the cluster.
AppendData(cluster_a->data() + first_append_size, second_append_size);
// Append the new cluster and verify that only the blocks
// in the new cluster are returned.
AppendCluster(GenerateCluster(5000, 6));
GenerateExpectedReads(5000, 6);
}
// Test the case where AppendData() is called before Init().
TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
scoped_ptr<uint8_t[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
false, false, &info_tracks, &info_tracks_size);
demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
&timestamp_offset_map_[kSourceId],
init_segment_received_cb_);
}
// Make sure Read() callbacks are dispatched with the proper data.
TEST_F(ChunkDemuxerTest, Read) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done));
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
}
TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
EXPECT_MEDIA_LOG(GeneratedSplice(13000, 10000));
AppendCluster(GenerateCluster(10, 4));
// Make sure that AppendCluster() does not fail with a cluster that has
// overlaps with the previously appended cluster.
EXPECT_MEDIA_LOG(SkippingSpliceAlreadySpliced(0));
AppendCluster(GenerateCluster(5, 4));
// Verify that AppendData() can still accept more data.
scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
EXPECT_MEDIA_LOG(GeneratedSplice(6000, 45000));
demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
&timestamp_offset_map_[kSourceId],
init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
// Test the case where block timecodes are not monotonically
// increasing but stay above the cluster timecode.
cb.SetClusterTimecode(5);
AddSimpleBlock(&cb, kAudioTrackNum, 5);
AddSimpleBlock(&cb, kVideoTrackNum, 10);
AddSimpleBlock(&cb, kAudioTrackNum, 7);
AddSimpleBlock(&cb, kVideoTrackNum, 15);
EXPECT_MEDIA_LOG(WebMOutOfOrderTimecode());
EXPECT_MEDIA_LOG(StreamParsingFailed());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendCluster(cb.Finish());
// Verify that AppendData() ignores data after the error.
scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
&timestamp_offset_map_[kSourceId],
init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
// Test timecodes going backwards and including values less than the cluster
// timecode.
cb.SetClusterTimecode(5);
AddSimpleBlock(&cb, kAudioTrackNum, 5);
AddSimpleBlock(&cb, kVideoTrackNum, 5);
AddSimpleBlock(&cb, kAudioTrackNum, 3);
AddSimpleBlock(&cb, kVideoTrackNum, 3);
EXPECT_MEDIA_LOG(WebMNegativeTimecodeOffset("-2"));
EXPECT_MEDIA_LOG(StreamParsingFailed());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendCluster(cb.Finish());
// Verify that AppendData() ignores data after the error.
scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
&timestamp_offset_map_[kSourceId],
init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
// Test monotonic increasing timestamps on a per stream
// basis.
cb.SetClusterTimecode(4);
AddSimpleBlock(&cb, kAudioTrackNum, 5);
AddSimpleBlock(&cb, kVideoTrackNum, 5);
AddSimpleBlock(&cb, kAudioTrackNum, 4);
AddSimpleBlock(&cb, kVideoTrackNum, 7);
EXPECT_MEDIA_LOG(WebMOutOfOrderTimecode());
EXPECT_MEDIA_LOG(StreamParsingFailed());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendCluster(cb.Finish());
}
// Test the case where a cluster is passed to AppendCluster() before
// INFO & TRACKS data.
TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(PIPELINE_ERROR_DECODE), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
EXPECT_MEDIA_LOG(WebMClusterBeforeFirstInfo());
EXPECT_MEDIA_LOG(StreamParsingFailed());
AppendCluster(GenerateCluster(0, 1));
}
// Test cases where we get an MarkEndOfStream() call during initialization.
TEST_F(ChunkDemuxerTest, EOSDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
MarkEndOfStream(PIPELINE_OK);
}
TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
CheckExpectedRanges("{ }");
MarkEndOfStream(PIPELINE_OK);
ShutdownDemuxer();
CheckExpectedRanges("{ }");
demuxer_->RemoveId(kSourceId);
demuxer_.reset();
}
TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
CheckExpectedRanges("{ }");
MarkEndOfStream(PIPELINE_OK);
CheckExpectedRanges("{ }");
}
TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
CheckExpectedRanges(kDefaultFirstClusterRange);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
MarkEndOfStream(PIPELINE_ERROR_DECODE);
CheckExpectedRanges(kDefaultFirstClusterRange);
}
TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
CheckExpectedRanges(kDefaultFirstClusterRange);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
MarkEndOfStream(PIPELINE_ERROR_NETWORK);
}
// Helper class to reduce duplicate code when testing end of stream
// Read() behavior.
class EndOfStreamHelper {
public:
explicit EndOfStreamHelper(Demuxer* demuxer)
: demuxer_(demuxer),
audio_read_done_(false),
video_read_done_(false) {
}
// Request a read on the audio and video streams.
void RequestReads() {
EXPECT_FALSE(audio_read_done_);
EXPECT_FALSE(video_read_done_);
DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
base::MessageLoop::current()->RunUntilIdle();
}
// Check to see if |audio_read_done_| and |video_read_done_| variables
// match |expected|.
void CheckIfReadDonesWereCalled(bool expected) {
base::MessageLoop::current()->RunUntilIdle();
EXPECT_EQ(expected, audio_read_done_);
EXPECT_EQ(expected, video_read_done_);
}
private:
static void OnEndOfStreamReadDone(
bool* called,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
EXPECT_EQ(status, DemuxerStream::kOk);
EXPECT_TRUE(buffer->end_of_stream());
*called = true;
}
Demuxer* demuxer_;
bool audio_read_done_;
bool video_read_done_;
DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
};
// Make sure that all pending reads that we don't have media data for get an
// "end of stream" buffer when MarkEndOfStream() is called.
TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 2));
bool audio_read_done_1 = false;
bool video_read_done_1 = false;
EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done_1));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done_1));
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done_1);
EXPECT_TRUE(video_read_done_1);
end_of_stream_helper_1.RequestReads();
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
MarkEndOfStream(PIPELINE_OK);
end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
end_of_stream_helper_2.RequestReads();
end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
}
// Make sure that all Read() calls after we get an MarkEndOfStream()
// call return an "end of stream" buffer.
TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 2));
bool audio_read_done_1 = false;
bool video_read_done_1 = false;
EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done_1));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done_1));
end_of_stream_helper_1.RequestReads();
EXPECT_TRUE(audio_read_done_1);
EXPECT_TRUE(video_read_done_1);
end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
MarkEndOfStream(PIPELINE_OK);
end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
// Request a few more reads and make sure we immediately get
// end of stream buffers.
end_of_stream_helper_2.RequestReads();
end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
end_of_stream_helper_3.RequestReads();
end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
}
TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(0, 10);
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
MarkEndOfStream(PIPELINE_OK);
// Start the first seek.
Seek(base::TimeDelta::FromMilliseconds(20));
// Simulate another seek being requested before the first
// seek has finished prerolling.
base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
demuxer_->CancelPendingSeek(seek_time2);
// Finish second seek.
Seek(seek_time2);
DemuxerStream::Status status;
base::TimeDelta last_timestamp;
// Make sure audio can reach end of stream.
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kOk);
// Make sure video can reach end of stream.
ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kOk);
}
// Verify buffered range change behavior for audio/video/text tracks.
TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
DemuxerStream* text_stream = NULL;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
AppendMuxedCluster(MuxedStreamInfo(kVideoTrackNum, "0K 33", 33),
MuxedStreamInfo(kAudioTrackNum, "0K 23K", 23));
// Check expected ranges and verify that an empty text track does not
// affect the expected ranges.
CheckExpectedRanges("{ [0,46) }");
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
MarkEndOfStream(PIPELINE_OK);
// Check expected ranges and verify that an empty text track does not
// affect the expected ranges.
CheckExpectedRanges("{ [0,66) }");
// Unmark end of stream state and verify that the ranges return to
// their pre-"end of stream" values.
demuxer_->UnmarkEndOfStream();
CheckExpectedRanges("{ [0,46) }");
// Add text track data and verify that the buffered ranges don't change
// since the intersection of all the tracks doesn't change.
EXPECT_MEDIA_LOG(SkippingSpliceAtOrBefore(0, 0));
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
AppendMuxedCluster(MuxedStreamInfo(kVideoTrackNum, "0K 33", 33),
MuxedStreamInfo(kAudioTrackNum, "0K 23K", 23),
MuxedStreamInfo(kTextTrackNum, "0K 100K"));
CheckExpectedRanges("{ [0,46) }");
// Mark end of stream and verify that text track data is reflected in
// the new range.
MarkEndOfStream(PIPELINE_OK);
CheckExpectedRanges("{ [0,200) }");
}
// Make sure AppendData() will accept elements that span multiple calls.
TEST_F(ChunkDemuxerTest, AppendingInPieces) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
scoped_ptr<uint8_t[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
false, false, &info_tracks, &info_tracks_size);
scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
scoped_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
uint8_t* dst = buffer.get();
memcpy(dst, info_tracks.get(), info_tracks_size);
dst += info_tracks_size;
memcpy(dst, cluster_a->data(), cluster_a->size());
dst += cluster_a->size();
memcpy(dst, cluster_b->data(), cluster_b->size());
dst += cluster_b->size();
ExpectInitMediaLogs(HAS_AUDIO | HAS_VIDEO);
EXPECT_CALL(*this, InitSegmentReceived());
AppendDataInPieces(buffer.get(), buffer_size);
GenerateExpectedReads(0, 9);
}
TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
{67, 6},
{100, 9},
{133, 12},
{kSkip, kSkip},
};
ExpectInitMediaLogs(HAS_AUDIO | HAS_VIDEO);
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(2)).Times(7);
// Expect duration adjustment since actual duration differs slightly from
// duration in the init segment.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2744)));
EXPECT_EQ(212949, demuxer_->GetMemoryUsage());
}
TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
{67, 6},
{100, 9},
{133, 12},
{kSkip, kSkip},
};
ExpectInitMediaLogs(HAS_AUDIO | HAS_VIDEO);
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(2)).Times(7);
ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
kInfiniteDuration()));
DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, audio->liveness());
DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, video->liveness());
EXPECT_EQ(212949, demuxer_->GetMemoryUsage());
}
TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
struct BufferTimestamps buffer_timestamps[] = {
{kSkip, 0},
{kSkip, 3},
{kSkip, 6},
{kSkip, 9},
{kSkip, 12},
{kSkip, kSkip},
};
ExpectInitMediaLogs(HAS_AUDIO);
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(2));
// Expect duration adjustment since actual duration differs slightly from
// duration in the init segment.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2744),
HAS_AUDIO));
EXPECT_EQ(18624, demuxer_->GetMemoryUsage());
}
TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
struct BufferTimestamps buffer_timestamps[] = {
{0, kSkip},
{33, kSkip},
{67, kSkip},
{100, kSkip},
{133, kSkip},
{kSkip, kSkip},
};
ExpectInitMediaLogs(HAS_VIDEO);
// Expect duration adjustment since actual duration differs slightly from
// duration in the init segment.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2703),
HAS_VIDEO));
EXPECT_EQ(194325, demuxer_->GetMemoryUsage());
}
TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
{33, 6},
{67, 9},
{100, 12},
{kSkip, kSkip},
};
ExpectInitMediaLogs(HAS_AUDIO | HAS_VIDEO);
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(2));
ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2767)));
}
// Verify that we output buffers before the entire cluster has been parsed.
TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done));
// Make sure the reads haven't completed yet.
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
// Append data one byte at a time until one or both reads complete.
int i = 0;
for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
AppendData(cluster->data() + i, 1);
message_loop_.RunUntilIdle();
}
EXPECT_TRUE(audio_read_done || video_read_done);
EXPECT_GT(i, 0);
EXPECT_LT(i, cluster->size());
audio_read_done = false;
video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(23),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(33),
&video_read_done));
// Make sure the reads haven't completed yet.
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
// Append the remaining data.
ASSERT_LT(i, cluster->size());
AppendData(cluster->data() + i, cluster->size() - i);
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
}
TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
kNoTimestamp(), PIPELINE_ERROR_DECODE), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
EXPECT_MEDIA_LOG(StreamParsingFailed());
uint8_t tmp = 0;
demuxer_->AppendData(kSourceId, &tmp, 1,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
&timestamp_offset_map_[kSourceId],
init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
PIPELINE_ERROR_DECODE), true);
std::vector<std::string> codecs(1);
codecs[0] = "vorbis";
ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
ChunkDemuxer::kOk);
// Video track is unexpected per mimetype.
EXPECT_MEDIA_LOG(InitSegmentMismatchesMimeType("a video", true));
EXPECT_MEDIA_LOG(StreamParsingFailed());
AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
}
TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
PIPELINE_ERROR_DECODE), true);
std::vector<std::string> codecs(1);
codecs[0] = "vp8";
ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
ChunkDemuxer::kOk);
// Audio track is unexpected per mimetype.
EXPECT_MEDIA_LOG(InitSegmentMismatchesMimeType("an audio", true));
EXPECT_MEDIA_LOG(StreamParsingFailed());
AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
}
TEST_F(ChunkDemuxerTest, AudioOnlyHeaderWithAVType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_ERROR_DECODE), true);
std::vector<std::string> codecs(2);
codecs[0] = "vorbis";
codecs[1] = "vp8";
ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
ChunkDemuxer::kOk);
// Video track is also expected per mimetype.
EXPECT_MEDIA_LOG(InitSegmentMismatchesMimeType("a video", false));
EXPECT_MEDIA_LOG(StreamParsingFailed());
AppendInitSegment(HAS_AUDIO);
}
TEST_F(ChunkDemuxerTest, VideoOnlyHeaderWithAVType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_ERROR_DECODE), true);
std::vector<std::string> codecs(2);
codecs[0] = "vorbis";
codecs[1] = "vp8";
ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
ChunkDemuxer::kOk);
// Audio track is also expected per mimetype.
EXPECT_MEDIA_LOG(InitSegmentMismatchesMimeType("an audio", false));
EXPECT_MEDIA_LOG(StreamParsingFailed());
AppendInitSegment(HAS_VIDEO);
}
TEST_F(ChunkDemuxerTest, MultipleHeaders) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
// Append another identical initialization segment.
EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
AppendCluster(kDefaultSecondCluster());
GenerateExpectedReads(0, 9);
}
TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
// Append audio and video data into separate source ids.
AppendCluster(audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
GenerateAudioStreamExpectedReads(0, 4);
AppendCluster(video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
GenerateVideoStreamExpectedReads(0, 4);
}
TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
// TODO(matthewjheaney): Here and elsewhere, we need more tests
// for inband text tracks (http://crbug/321455).
std::string audio_id = "audio1";
std::string video_id = "video1";
EXPECT_CALL(host_, AddTextStream(_, _))
.Times(Exactly(2));
ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
// Append audio and video data into separate source ids.
AppendCluster(audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
GenerateAudioStreamExpectedReads(0, 4);
AppendCluster(video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
GenerateVideoStreamExpectedReads(0, 4);
}
TEST_F(ChunkDemuxerTest, AddIdFailures) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
// Adding an id with audio/video should fail because we already added audio.
ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
ExpectInitMediaLogs(HAS_AUDIO);
EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
// Adding an id after append should fail.
ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
}
// Test that Read() calls after a RemoveId() return "end of stream" buffers.
TEST_F(ChunkDemuxerTest, RemoveId) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
// Append audio and video data into separate source ids.
AppendCluster(audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
// Read() from audio should return normal buffers.
GenerateAudioStreamExpectedReads(0, 4);
// Remove the audio id.
demuxer_->RemoveId(audio_id);
// Read() from audio should return "end of stream" buffers.
bool audio_read_done = false;
ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done);
// Read() from video should still return normal buffers.
GenerateVideoStreamExpectedReads(0, 4);
}
// Test that removing an ID immediately after adding it does not interfere with
// quota for new IDs in the future.
TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
std::string audio_id_1 = "audio1";
ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
demuxer_->RemoveId(audio_id_1);
std::string audio_id_2 = "audio2";
ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
}
TEST_F(ChunkDemuxerTest, SeekCanceled) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Append cluster at the beginning of the stream.
AppendCluster(GenerateCluster(0, 4));
// Seek to an unbuffered region.
Seek(base::TimeDelta::FromSeconds(50));
// Attempt to read in unbuffered area; should not fulfill the read.
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
// Now cancel the pending seek, which should flush the reads with empty
// buffers.
base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
demuxer_->CancelPendingSeek(seek_time);
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
// A seek back to the buffered region should succeed.
Seek(seek_time);
GenerateExpectedReads(0, 4);
}
TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Append cluster at the beginning of the stream.
AppendCluster(GenerateCluster(0, 4));
// Start waiting for a seek.
base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
demuxer_->StartWaitingForSeek(seek_time1);
// Now cancel the upcoming seek to an unbuffered region.
demuxer_->CancelPendingSeek(seek_time2);
demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
// Read requests should be fulfilled with empty buffers.
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
// A seek back to the buffered region should succeed.
Seek(seek_time2);
GenerateExpectedReads(0, 4);
}
// Test that Seek() successfully seeks to all source IDs.
TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
AppendCluster(
audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(
video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
// Read() should return buffers at 0.
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done));
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
// Seek to 3 (an unbuffered region).
Seek(base::TimeDelta::FromSeconds(3));
audio_read_done = false;
video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromSeconds(3),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromSeconds(3),
&video_read_done));
// Read()s should not return until after data is appended at the Seek point.
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
AppendCluster(audio_id,
GenerateSingleStreamCluster(
3000, 3092, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(video_id,
GenerateSingleStreamCluster(
3000, 3132, kVideoTrackNum, kVideoBlockDuration));
message_loop_.RunUntilIdle();
// Read() should return buffers at 3.
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
}