Update stable to r5019.
git-svn-id: http://webrtc.googlecode.com/svn/stable/webrtc@5022 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/build/common.gypi b/build/common.gypi
index 73dda8d..1207cc1 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -311,12 +311,6 @@
}],
],
}],
- ['clang==1', {
- 'cflags!': [
- # TODO(kjellander): Remove when Chromium's common.gypi enables it.
- '-Wno-unused-const-variable',
- ],
- }],
], # conditions
'direct_dependent_settings': {
'include_dirs': [
diff --git a/common_audio/resampler/push_resampler.cc b/common_audio/resampler/push_resampler.cc
index 92775af..2994418 100644
--- a/common_audio/resampler/push_resampler.cc
+++ b/common_audio/resampler/push_resampler.cc
@@ -19,9 +19,7 @@
namespace webrtc {
PushResampler::PushResampler()
- : sinc_resampler_(NULL),
- sinc_resampler_right_(NULL),
- src_sample_rate_hz_(0),
+ : src_sample_rate_hz_(0),
dst_sample_rate_hz_(0),
num_channels_(0),
src_left_(NULL),
diff --git a/common_audio/resampler/push_sinc_resampler.cc b/common_audio/resampler/push_sinc_resampler.cc
index 886d763..1fb72dc 100644
--- a/common_audio/resampler/push_sinc_resampler.cc
+++ b/common_audio/resampler/push_sinc_resampler.cc
@@ -17,14 +17,13 @@
PushSincResampler::PushSincResampler(int source_frames,
int destination_frames)
- : resampler_(NULL),
+ : resampler_(new SincResampler(source_frames * 1.0 / destination_frames,
+ source_frames, this)),
float_buffer_(new float[destination_frames]),
source_ptr_(NULL),
destination_frames_(destination_frames),
first_pass_(true),
source_available_(0) {
- resampler_.reset(new SincResampler(source_frames * 1.0 / destination_frames,
- source_frames, this));
}
PushSincResampler::~PushSincResampler() {
diff --git a/modules/audio_coding/codecs/g711/g711_interface.c b/modules/audio_coding/codecs/g711/g711_interface.c
index 9ef7884..087e3e1 100644
--- a/modules/audio_coding/codecs/g711/g711_interface.c
+++ b/modules/audio_coding/codecs/g711/g711_interface.c
@@ -31,7 +31,7 @@
for (n = 0; n < len; n++) {
tempVal = (uint16_t) linear_to_alaw(speechIn[n]);
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
if ((n & 0x1) == 1) {
encoded[n >> 1] |= ((uint16_t) tempVal);
} else {
@@ -69,7 +69,7 @@
for (n = 0; n < len; n++) {
tempVal = (uint16_t) linear_to_ulaw(speechIn[n]);
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
if ((n & 0x1) == 1) {
encoded[n >> 1] |= ((uint16_t) tempVal);
} else {
@@ -103,7 +103,7 @@
}
for (n = 0; n < len; n++) {
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
if ((n & 0x1) == 1) {
tempVal = ((uint16_t) encoded[n >> 1] & 0xFF);
} else {
@@ -140,7 +140,7 @@
}
for (n = 0; n < len; n++) {
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
if ((n & 0x1) == 1) {
tempVal = ((uint16_t) encoded[n >> 1] & 0xFF);
} else {
diff --git a/modules/audio_coding/codecs/ilbc/decode.c b/modules/audio_coding/codecs/ilbc/decode.c
index 5da9685..febd4ce 100644
--- a/modules/audio_coding/codecs/ilbc/decode.c
+++ b/modules/audio_coding/codecs/ilbc/decode.c
@@ -28,7 +28,7 @@
#include "decode_residual.h"
#include "unpack_bits.h"
#include "hp_output.h"
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
#include "swap_bytes.h"
#endif
@@ -54,7 +54,7 @@
int16_t PLCresidual[BLOCKL_MAX + LPC_FILTERORDER];
int16_t syntdenum[NSUB_MAX*(LPC_FILTERORDER+1)];
int16_t PLClpc[LPC_FILTERORDER + 1];
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
uint16_t swapped[NO_OF_WORDS_30MS];
#endif
iLBC_bits *iLBCbits_inst = (iLBC_bits*)PLCresidual;
@@ -68,7 +68,7 @@
/* Unpacketize bits into parameters */
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
WebRtcIlbcfix_SwapBytes(bytes, iLBCdec_inst->no_of_words, swapped);
last_bit = WebRtcIlbcfix_UnpackBits(swapped, iLBCbits_inst, iLBCdec_inst->mode);
#else
diff --git a/modules/audio_coding/codecs/ilbc/encode.c b/modules/audio_coding/codecs/ilbc/encode.c
index 75d1672..2f899a5 100644
--- a/modules/audio_coding/codecs/ilbc/encode.c
+++ b/modules/audio_coding/codecs/ilbc/encode.c
@@ -32,7 +32,7 @@
#include "unpack_bits.h"
#include "index_conv_dec.h"
#endif
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
#include "swap_bytes.h"
#endif
@@ -489,7 +489,7 @@
WebRtcIlbcfix_PackBits(bytes, iLBCbits_inst, iLBCenc_inst->mode);
#endif
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
/* Swap bytes for LITTLE ENDIAN since the packbits()
function assumes BIG_ENDIAN machine */
#ifdef SPLIT_10MS
diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index 945475f..8baa307 100644
--- a/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -327,7 +327,7 @@
{
ISACFIX_SubStruct *ISAC_inst;
int16_t stream_len;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
@@ -352,7 +352,7 @@
/* convert from bytes to int16_t */
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0;k<(stream_len+1)>>1;k++) {
encoded[k] = (int16_t)( ( (uint16_t)(ISAC_inst->ISACenc_obj.bitstr_obj).stream[k] >> 8 )
| (((ISAC_inst->ISACenc_obj.bitstr_obj).stream[k] & 0x00FF) << 8));
@@ -442,7 +442,7 @@
/* convert from bytes to int16_t */
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0;k<(stream_len+1)>>1;k++) {
encoded[k] = (int16_t)(((uint16_t)(ISAC_inst->ISACenc_obj.bitstr_obj).stream[k] >> 8)
| (((ISAC_inst->ISACenc_obj.bitstr_obj).stream[k] & 0x00FF) << 8));
@@ -485,7 +485,7 @@
{
ISACFIX_SubStruct *ISAC_inst;
int16_t stream_len;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
@@ -507,7 +507,7 @@
return -1;
}
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0;k<(stream_len+1)>>1;k++) {
encoded[k] = (int16_t)( ( (uint16_t)(ISAC_inst->ISACenc_obj.bitstr_obj).stream[k] >> 8 )
| (((ISAC_inst->ISACenc_obj.bitstr_obj).stream[k] & 0x00FF) << 8));
@@ -588,7 +588,7 @@
ISACFIX_SubStruct *ISAC_inst;
Bitstr_dec streamdata;
uint16_t partOfStream[5];
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t err;
@@ -621,7 +621,7 @@
streamdata.stream_index = 0;
streamdata.full = 1;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0; k<5; k++) {
streamdata.stream[k] = (uint16_t) (((uint16_t)encoded[k] >> 8)|((encoded[k] & 0xFF)<<8));
}
@@ -676,7 +676,7 @@
ISACFIX_SubStruct *ISAC_inst;
Bitstr_dec streamdata;
uint16_t partOfStream[5];
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t err;
@@ -709,7 +709,7 @@
streamdata.stream_index = 0;
streamdata.full = 1;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0; k<5; k++) {
streamdata.stream[k] = (uint16_t) ((encoded[k] >> 8)|((encoded[k] & 0xFF)<<8));
}
@@ -765,7 +765,7 @@
/* number of samples (480 or 960), output from decoder */
/* that were actually used in the encoder/decoder (determined on the fly) */
int16_t number_of_samples;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t declen = 0;
@@ -793,7 +793,7 @@
(ISAC_inst->ISACdec_obj.bitstr_obj).stream = (uint16_t *)encoded;
/* convert bitstream from int16_t to bytes */
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0; k<(len>>1); k++) {
(ISAC_inst->ISACdec_obj.bitstr_obj).stream[k] = (uint16_t) ((encoded[k] >> 8)|((encoded[k] & 0xFF)<<8));
}
@@ -868,7 +868,7 @@
/* twice the number of samples (480 or 960), output from decoder */
/* that were actually used in the encoder/decoder (determined on the fly) */
int16_t number_of_samples;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t declen = 0;
@@ -894,7 +894,7 @@
(ISAC_inst->ISACdec_obj.bitstr_obj).stream = (uint16_t *)encoded;
/* convert bitstream from int16_t to bytes */
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0; k<(len>>1); k++) {
(ISAC_inst->ISACdec_obj.bitstr_obj).stream[k] = (uint16_t) ((encoded[k] >> 8)|((encoded[k] & 0xFF)<<8));
}
@@ -1267,7 +1267,7 @@
{
Bitstr_dec streamdata;
uint16_t partOfStream[5];
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t err;
@@ -1280,7 +1280,7 @@
streamdata.stream_index = 0;
streamdata.full = 1;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0; k<5; k++) {
streamdata.stream[k] = (uint16_t) (((uint16_t)encoded[k] >> 8)|((encoded[k] & 0xFF)<<8));
}
@@ -1316,7 +1316,7 @@
{
Bitstr_dec streamdata;
uint16_t partOfStream[5];
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t err;
@@ -1329,7 +1329,7 @@
streamdata.stream_index = 0;
streamdata.full = 1;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k=0; k<5; k++) {
streamdata.stream[k] = (uint16_t) (((uint16_t)encoded[k] >> 8)|((encoded[k] & 0xFF)<<8));
}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
index a1dced9..9c4e587 100644
--- a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
@@ -8,20 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-/*
- * pitch_estimator.c
- *
- * Pitch filter functions
- *
- */
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
#ifdef WEBRTC_ARCH_ARM_NEON
#include <arm_neon.h>
#endif
-#include "pitch_estimator.h"
-#include "signal_processing_library.h"
-#include "system_wrappers/interface/compile_assert.h"
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/system_wrappers/interface/compile_assert_c.h"
/* log2[0.2, 0.5, 0.98] in Q8 */
static const int16_t kLogLagWinQ8[3] = {
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c b/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
index df961a7..c3db01c 100644
--- a/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
@@ -8,18 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-/*
- * pitch_filter.c
- *
- * Pitch filter functions
- *
- */
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
-#include "common_audio/signal_processing/include/signal_processing_library.h"
-#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
-#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
-#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
-#include "system_wrappers/interface/compile_assert.h"
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h"
+#include "webrtc/system_wrappers/interface/compile_assert_c.h"
// Number of segments in a pitch subframe.
static const int kSegments = 5;
diff --git a/modules/audio_coding/codecs/isac/main/source/isac.c b/modules/audio_coding/codecs/isac/main/source/isac.c
index 1e90272..f3f1650 100644
--- a/modules/audio_coding/codecs/isac/main/source/isac.c
+++ b/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -742,7 +742,7 @@
WebRtcIsac_GetCrc((int16_t*)(&(ptrEncodedUW8[streamLenLB + 1])),
streamLenUB + garbageLen, &crc);
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
ptrEncodedUW8[streamLen - LEN_CHECK_SUM_WORD8 + k] =
(uint8_t)((crc >> (24 - k * 8)) & 0xFF);
@@ -805,7 +805,7 @@
int32_t currentBN;
uint8_t* encodedPtrUW8 = (uint8_t*)encoded;
uint32_t crc;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int16_t k;
#endif
ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
@@ -896,7 +896,7 @@
WebRtcIsac_GetCrc((int16_t*)(&(encodedPtrUW8[streamLenLB + 1])),
streamLenUB, &crc);
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
encodedPtrUW8[totalStreamLen - LEN_CHECK_SUM_WORD8 + k] =
(uint8_t)((crc >> (24 - k * 8)) & 0xFF);
@@ -1008,7 +1008,7 @@
uint32_t arr_ts) {
ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
Bitstr streamdata;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t err;
@@ -1029,7 +1029,7 @@
WebRtcIsac_ResetBitstream(&(streamdata));
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k = 0; k < 10; k++) {
streamdata.stream[k] = (uint8_t)((encoded[k >> 1] >>
((k & 1) << 3)) & 0xFF);
@@ -1741,14 +1741,14 @@
int16_t WebRtcIsac_ReadBwIndex(const int16_t* encoded,
int16_t* bweIndex) {
Bitstr streamdata;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t err;
WebRtcIsac_ResetBitstream(&(streamdata));
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k = 0; k < 10; k++) {
streamdata.stream[k] = (uint8_t)((encoded[k >> 1] >>
((k & 1) << 3)) & 0xFF);
@@ -1790,7 +1790,7 @@
const int16_t* encoded,
int16_t* frameLength) {
Bitstr streamdata;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
int16_t err;
@@ -1798,7 +1798,7 @@
WebRtcIsac_ResetBitstream(&(streamdata));
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k = 0; k < 10; k++) {
streamdata.stream[k] = (uint8_t)((encoded[k >> 1] >>
((k & 1) << 3)) & 0xFF);
@@ -2108,7 +2108,7 @@
int16_t totalLenUB;
uint8_t* ptrEncodedUW8 = (uint8_t*)encoded;
ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
int k;
#endif
@@ -2164,7 +2164,7 @@
WebRtcIsac_GetCrc((int16_t*)(&(ptrEncodedUW8[streamLenLB + 1])),
streamLenUB, &crc);
-#ifndef WEBRTC_BIG_ENDIAN
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
ptrEncodedUW8[streamLen - LEN_CHECK_SUM_WORD8 + k] =
(uint8_t)((crc >> (24 - k * 8)) & 0xFF);
diff --git a/modules/audio_coding/codecs/pcm16b/pcm16b.c b/modules/audio_coding/codecs/pcm16b/pcm16b.c
index 04814b7..34aadc3 100644
--- a/modules/audio_coding/codecs/pcm16b/pcm16b.c
+++ b/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -15,7 +15,7 @@
#include "typedefs.h"
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
#include "signal_processing_library.h"
#endif
@@ -29,7 +29,7 @@
int16_t len,
int16_t *speechOut16b)
{
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
WEBRTC_SPL_MEMCPY_W16(speechOut16b, speechIn16b, len);
#else
int i;
@@ -68,7 +68,7 @@
int16_t *speechOut16b,
int16_t* speechType)
{
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
WEBRTC_SPL_MEMCPY_W8(speechOut16b, speechIn16b, ((len*sizeof(int16_t)+1)>>1));
#else
int i;
diff --git a/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index 911605e..59d5727 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -140,7 +140,6 @@
receiver_initialized_(false),
callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
secondary_send_codec_inst_(),
- secondary_encoder_(NULL),
codec_timestamp_(expected_codec_ts_),
first_10ms_data_(false) {
diff --git a/modules/audio_coding/main/source/audio_coding_module_impl.cc b/modules/audio_coding/main/source/audio_coding_module_impl.cc
index 62a13c5..3802733 100644
--- a/modules/audio_coding/main/source/audio_coding_module_impl.cc
+++ b/modules/audio_coding/main/source/audio_coding_module_impl.cc
@@ -154,7 +154,6 @@
last_detected_tone_(kACMToneEnd),
callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
secondary_send_codec_inst_(),
- secondary_encoder_(NULL),
initial_delay_ms_(0),
num_packets_accumulated_(0),
num_bytes_accumulated_(0),
diff --git a/modules/audio_coding/neteq/dtmf_buffer.c b/modules/audio_coding/neteq/dtmf_buffer.c
index 9e32126..1788635 100644
--- a/modules/audio_coding/neteq/dtmf_buffer.c
+++ b/modules/audio_coding/neteq/dtmf_buffer.c
@@ -93,7 +93,7 @@
if (len == 4)
{
EventStart = encoded;
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
value=((*EventStart)>>8);
endEvent=((*EventStart)&0x80)>>7;
Volume=((*EventStart)&0x3F);
diff --git a/modules/audio_coding/neteq/rtp.c b/modules/audio_coding/neteq/rtp.c
index f23f351..6ab5944 100644
--- a/modules/audio_coding/neteq/rtp.c
+++ b/modules/audio_coding/neteq/rtp.c
@@ -31,7 +31,7 @@
return RTP_TOO_SHORT_PACKET;
}
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
i_IPver = (((uint16_t) (pw16_Datagram[0] & 0xC000)) >> 14); /* Extract the version */
i_P = (((uint16_t) (pw16_Datagram[0] & 0x2000)) >> 13); /* Extract the P bit */
i_X = (((uint16_t) (pw16_Datagram[0] & 0x1000)) >> 12); /* Extract the X bit */
@@ -62,7 +62,7 @@
i_padlength = ((pw16_Datagram[(i_DatagramLen >> 1) - 1]) & 0xFF);
}
}
-#else /* WEBRTC_LITTLE_ENDIAN */
+#else /* WEBRTC_ARCH_LITTLE_ENDIAN */
i_IPver = (((uint16_t) (pw16_Datagram[0] & 0xC0)) >> 6); /* Extract the IP version */
i_P = (((uint16_t) (pw16_Datagram[0] & 0x20)) >> 5); /* Extract the P bit */
i_X = (((uint16_t) (pw16_Datagram[0] & 0x10)) >> 4); /* Extract the X bit */
@@ -126,7 +126,7 @@
int i_discardedBlockLength = 0;
int singlePayload = 0;
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
if ((pw16_data[0] & 0x8000) == 0)
{
/* Only one payload in this packet*/
@@ -155,7 +155,7 @@
((((uint16_t)pw16_data[1]) & 0xFC00) >> 10);
i_blockLength = (((uint16_t)pw16_data[1]) & 0x3FF);
}
-#else /* WEBRTC_LITTLE_ENDIAN */
+#else /* WEBRTC_ARCH_LITTLE_ENDIAN */
if ((pw16_data[0] & 0x80) == 0)
{
/* Only one payload in this packet */
diff --git a/modules/audio_coding/neteq4/neteq_impl.cc b/modules/audio_coding/neteq4/neteq_impl.cc
index 4ed3976..a5c45ff 100644
--- a/modules/audio_coding/neteq4/neteq_impl.cc
+++ b/modules/audio_coding/neteq4/neteq_impl.cc
@@ -59,8 +59,7 @@
PacketBuffer* packet_buffer,
PayloadSplitter* payload_splitter,
TimestampScaler* timestamp_scaler)
- : background_noise_(NULL),
- buffer_level_filter_(buffer_level_filter),
+ : buffer_level_filter_(buffer_level_filter),
decoder_database_(decoder_database),
delay_manager_(delay_manager),
delay_peak_detector_(delay_peak_detector),
@@ -70,14 +69,6 @@
payload_splitter_(payload_splitter),
timestamp_scaler_(timestamp_scaler),
vad_(new PostDecodeVad()),
- algorithm_buffer_(NULL),
- sync_buffer_(NULL),
- expand_(NULL),
- normal_(NULL),
- merge_(NULL),
- accelerate_(NULL),
- preemptive_expand_(NULL),
- comfort_noise_(NULL),
last_mode_(kModeNormal),
mute_factor_array_(NULL),
decoded_buffer_length_(kMaxFrameSize),
diff --git a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index da16814..c5cf137 100644
--- a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -124,8 +124,6 @@
_scratchMixedParticipants(),
_scratchVadPositiveParticipantsAmount(0),
_scratchVadPositiveParticipants(),
- _crit(NULL),
- _cbCrit(NULL),
_id(id),
_minimumMixingFreq(kLowestPossible),
_mixReceiver(NULL),
@@ -142,8 +140,7 @@
_timeStamp(0),
_timeScheduler(kProcessPeriodicityInMs),
_mixedAudioLevel(),
- _processCalls(0),
- _limiter(NULL)
+ _processCalls(0)
{}
bool AudioConferenceMixerImpl::Init()
diff --git a/modules/audio_device/linux/audio_device_alsa_linux.cc b/modules/audio_device/linux/audio_device_alsa_linux.cc
index b3d87c6..caa1efe 100644
--- a/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -1177,7 +1177,7 @@
_playoutFramesIn10MS = _playoutFreq/100;
if ((errVal = LATE(snd_pcm_set_params)( _handlePlayout,
-#if defined(WEBRTC_BIG_ENDIAN)
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
SND_PCM_FORMAT_S16_BE,
#else
SND_PCM_FORMAT_S16_LE, //format
@@ -1333,7 +1333,7 @@
_recordingFramesIn10MS = _recordingFreq/100;
if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
-#if defined(WEBRTC_BIG_ENDIAN)
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
SND_PCM_FORMAT_S16_BE, //format
#else
SND_PCM_FORMAT_S16_LE, //format
@@ -1352,7 +1352,7 @@
_recChannels = 1;
if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
-#if defined(WEBRTC_BIG_ENDIAN)
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
SND_PCM_FORMAT_S16_BE, //format
#else
SND_PCM_FORMAT_S16_LE, //format
diff --git a/modules/audio_device/mac/audio_device_mac.cc b/modules/audio_device/mac/audio_device_mac.cc
index 9da1880..b07c94d 100644
--- a/modules/audio_device/mac/audio_device_mac.cc
+++ b/modules/audio_device/mac/audio_device_mac.cc
@@ -97,7 +97,7 @@
assert(msg != NULL);
assert(err != NULL);
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
#else
// We need to flip the characters in this case.
@@ -1457,7 +1457,7 @@
_outDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
| kLinearPCMFormatFlagIsPacked;
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
_outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
#endif
_outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
@@ -1681,7 +1681,7 @@
_inDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
| kLinearPCMFormatFlagIsPacked;
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
_inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
#endif
_inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
diff --git a/modules/audio_device/mac/audio_mixer_manager_mac.cc b/modules/audio_device/mac/audio_mixer_manager_mac.cc
index 08e4197..952dc11 100644
--- a/modules/audio_device/mac/audio_mixer_manager_mac.cc
+++ b/modules/audio_device/mac/audio_mixer_manager_mac.cc
@@ -1154,7 +1154,7 @@
assert(msg != NULL);
assert(err != NULL);
-#ifdef WEBRTC_BIG_ENDIAN
+#ifdef WEBRTC_ARCH_BIG_ENDIAN
WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
#else
// We need to flip the characters in this case.
diff --git a/modules/audio_processing/aec/aec_resampler.c b/modules/audio_processing/aec/aec_resampler.c
index ebd052f..5382665 100644
--- a/modules/audio_processing/aec/aec_resampler.c
+++ b/modules/audio_processing/aec/aec_resampler.c
@@ -9,8 +9,7 @@
*/
/* Resamples a signal to an arbitrary rate. Used by the AEC to compensate for
- * clock
- * skew by resampling the farend signal.
+ * clock skew by resampling the farend signal.
*/
#include "webrtc/modules/audio_processing/aec/aec_resampler.h"
diff --git a/modules/audio_processing/aecm/aecm_core.c b/modules/audio_processing/aecm/aecm_core.c
index a44ce08..2c0a40f 100644
--- a/modules/audio_processing/aecm/aecm_core.c
+++ b/modules/audio_processing/aecm/aecm_core.c
@@ -18,7 +18,7 @@
#include "webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
-#include "webrtc/system_wrappers/interface/compile_assert.h"
+#include "webrtc/system_wrappers/interface/compile_assert_c.h"
#include "webrtc/system_wrappers/interface/cpu_features_wrapper.h"
#include "webrtc/typedefs.h"
diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc
index edf20bc..b33049e 100644
--- a/modules/audio_processing/audio_processing_impl.cc
+++ b/modules/audio_processing/audio_processing_impl.cc
@@ -638,7 +638,7 @@
if (size <= 0) {
return kUnspecifiedError;
}
-#if defined(WEBRTC_BIG_ENDIAN)
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
// TODO(ajm): Use little-endian "on the wire". For the moment, we can be
// pretty safe in assuming little-endian.
#endif
diff --git a/modules/audio_processing/utility/delay_estimator_wrapper.c b/modules/audio_processing/utility/delay_estimator_wrapper.c
index c358f13..b72b8ff 100644
--- a/modules/audio_processing/utility/delay_estimator_wrapper.c
+++ b/modules/audio_processing/utility/delay_estimator_wrapper.c
@@ -16,7 +16,7 @@
#include "webrtc/modules/audio_processing/utility/delay_estimator.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_internal.h"
-#include "webrtc/system_wrappers/interface/compile_assert.h"
+#include "webrtc/system_wrappers/interface/compile_assert_c.h"
// Only bit |kBandFirst| through bit |kBandLast| are processed and
// |kBandFirst| - |kBandLast| must be < 32.
diff --git a/modules/bitrate_controller/bitrate_controller_impl.cc b/modules/bitrate_controller/bitrate_controller_impl.cc
index 20cc3ac..11c36c0 100644
--- a/modules/bitrate_controller/bitrate_controller_impl.cc
+++ b/modules/bitrate_controller/bitrate_controller_impl.cc
@@ -11,6 +11,7 @@
#include "webrtc/modules/bitrate_controller/bitrate_controller_impl.h"
+#include <algorithm>
#include <utility>
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
@@ -69,13 +70,58 @@
owner_->OnReceivedRtcpReceiverReport(fraction_lost_aggregate, rtt,
total_number_of_packets, now_ms);
}
+
private:
std::map<uint32_t, uint32_t> ssrc_to_last_received_extended_high_seq_num_;
BitrateControllerImpl* owner_;
};
-BitrateController* BitrateController::CreateBitrateController() {
- return new BitrateControllerImpl();
+class BitrateControllerEnforceMinRate : public BitrateControllerImpl {
+ private:
+ void LowRateAllocation(uint32_t bitrate,
+ uint8_t fraction_loss,
+ uint32_t rtt,
+ uint32_t sum_min_bitrates) {
+ // Min bitrate to all observers.
+ BitrateObserverConfList::iterator it;
+ for (it = bitrate_observers_.begin(); it != bitrate_observers_.end();
+ ++it) {
+ it->first->OnNetworkChanged(it->second->min_bitrate_, fraction_loss,
+ rtt);
+ }
+ // Set sum of min to current send bitrate.
+ bandwidth_estimation_.SetSendBitrate(sum_min_bitrates);
+ }
+};
+
+class BitrateControllerNoEnforceMinRate : public BitrateControllerImpl {
+ private:
+ void LowRateAllocation(uint32_t bitrate,
+ uint8_t fraction_loss,
+ uint32_t rtt,
+ uint32_t sum_min_bitrates) {
+ // Allocate up to |min_bitrate_| to one observer at a time, until
+ // |bitrate| is depleted.
+ uint32_t remainder = bitrate;
+ BitrateObserverConfList::iterator it;
+ for (it = bitrate_observers_.begin(); it != bitrate_observers_.end();
+ ++it) {
+ uint32_t allocation = std::min(remainder, it->second->min_bitrate_);
+ it->first->OnNetworkChanged(allocation, fraction_loss, rtt);
+ remainder -= allocation;
+ }
+ // Set |bitrate| to current send bitrate.
+ bandwidth_estimation_.SetSendBitrate(bitrate);
+ }
+};
+
+BitrateController* BitrateController::CreateBitrateController(
+ bool enforce_min_bitrate) {
+ if (enforce_min_bitrate) {
+ return new BitrateControllerEnforceMinRate();
+ } else {
+ return new BitrateControllerNoEnforceMinRate();
+ }
}
BitrateControllerImpl::BitrateControllerImpl()
@@ -201,15 +247,7 @@
sum_min_bitrates += it->second->min_bitrate_;
}
if (bitrate <= sum_min_bitrates) {
- // Min bitrate to all observers.
- for (it = bitrate_observers_.begin(); it != bitrate_observers_.end();
- ++it) {
- it->first->OnNetworkChanged(it->second->min_bitrate_, fraction_loss,
- rtt);
- }
- // Set sum of min to current send bitrate.
- bandwidth_estimation_.SetSendBitrate(sum_min_bitrates);
- return;
+ return LowRateAllocation(bitrate, fraction_loss, rtt, sum_min_bitrates);
}
uint32_t bitrate_per_observer = (bitrate - sum_min_bitrates) /
number_of_observers;
@@ -248,4 +286,5 @@
bool BitrateControllerImpl::AvailableBandwidth(uint32_t* bandwidth) const {
return bandwidth_estimation_.AvailableBandwidth(bandwidth);
}
+
} // namespace webrtc
diff --git a/modules/bitrate_controller/bitrate_controller_impl.h b/modules/bitrate_controller/bitrate_controller_impl.h
index 4c891d9..5e56607 100644
--- a/modules/bitrate_controller/bitrate_controller_impl.h
+++ b/modules/bitrate_controller/bitrate_controller_impl.h
@@ -19,6 +19,7 @@
#include <list>
#include <map>
+#include <utility>
#include "webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
@@ -67,6 +68,9 @@
BitrateObserver* observer_;
uint32_t min_bitrate_;
};
+ typedef std::pair<BitrateObserver*, BitrateConfiguration*>
+ BitrateObserverConfiguration;
+ typedef std::list<BitrateObserverConfiguration> BitrateObserverConfList;
// Called by BitrateObserver's direct from the RTCP module.
void OnReceivedEstimatedBitrate(const uint32_t bitrate);
@@ -76,21 +80,24 @@
const int number_of_packets,
const uint32_t now_ms);
+ SendSideBandwidthEstimation bandwidth_estimation_;
+ BitrateObserverConfList bitrate_observers_;
+
private:
typedef std::multimap<uint32_t, ObserverConfiguration*> ObserverSortingMap;
- typedef std::pair<BitrateObserver*, BitrateConfiguration*>
- BitrateObserverConfiguration;
- typedef std::list<BitrateObserverConfiguration> BitrateObserverConfList;
BitrateObserverConfList::iterator
FindObserverConfigurationPair(const BitrateObserver* observer);
void OnNetworkChanged(const uint32_t bitrate,
const uint8_t fraction_loss, // 0 - 255.
const uint32_t rtt);
+ // Derived classes must implement this strategy method.
+ virtual void LowRateAllocation(uint32_t bitrate,
+ uint8_t fraction_loss,
+ uint32_t rtt,
+ uint32_t sum_min_bitrates) = 0;
CriticalSectionWrapper* critsect_;
- SendSideBandwidthEstimation bandwidth_estimation_;
- BitrateObserverConfList bitrate_observers_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_BITRATE_CONTROLLER_BITRATE_CONTROLLER_IMPL_H_
diff --git a/modules/bitrate_controller/bitrate_controller_unittest.cc b/modules/bitrate_controller/bitrate_controller_unittest.cc
index 7abe71b..30f85a8 100644
--- a/modules/bitrate_controller/bitrate_controller_unittest.cc
+++ b/modules/bitrate_controller/bitrate_controller_unittest.cc
@@ -57,12 +57,12 @@
class BitrateControllerTest : public ::testing::Test {
protected:
- BitrateControllerTest() {
- }
+ BitrateControllerTest() : enforce_min_bitrate_(true) {}
~BitrateControllerTest() {}
virtual void SetUp() {
- controller_ = BitrateController::CreateBitrateController();
+ controller_ =
+ BitrateController::CreateBitrateController(enforce_min_bitrate_);
bandwidth_observer_ = controller_->CreateRtcpBandwidthObserver();
}
@@ -70,6 +70,7 @@
delete bandwidth_observer_;
delete controller_;
}
+ bool enforce_min_bitrate_;
BitrateController* controller_;
RtcpBandwidthObserver* bandwidth_observer_;
};
@@ -414,3 +415,86 @@
controller_->RemoveBitrateObserver(&bitrate_observer_1);
controller_->RemoveBitrateObserver(&bitrate_observer_2);
}
+
+class BitrateControllerTestNoEnforceMin : public BitrateControllerTest {
+ protected:
+ BitrateControllerTestNoEnforceMin() : BitrateControllerTest() {
+ enforce_min_bitrate_ = false;
+ }
+};
+
+// The following three tests verify that the EnforceMinBitrate() method works
+// as intended.
+TEST_F(BitrateControllerTestNoEnforceMin, OneBitrateObserver) {
+ TestBitrateObserver bitrate_observer_1;
+ controller_->SetBitrateObserver(&bitrate_observer_1, 200000, 100000, 400000);
+
+ // High REMB.
+ bandwidth_observer_->OnReceivedEstimatedBitrate(150000);
+ EXPECT_EQ(150000u, bitrate_observer_1.last_bitrate_);
+
+ // Low REMB.
+ bandwidth_observer_->OnReceivedEstimatedBitrate(1000);
+ EXPECT_EQ(1000u, bitrate_observer_1.last_bitrate_);
+
+ controller_->RemoveBitrateObserver(&bitrate_observer_1);
+}
+
+TEST_F(BitrateControllerTestNoEnforceMin, ThreeBitrateObservers) {
+ TestBitrateObserver bitrate_observer_1;
+ TestBitrateObserver bitrate_observer_2;
+ TestBitrateObserver bitrate_observer_3;
+ // Set up the observers with min bitrates at 100000, 200000, and 300000.
+ // Note: The start bitrate of bitrate_observer_1 (700000) is used as the
+ // overall start bitrate.
+ controller_->SetBitrateObserver(&bitrate_observer_1, 700000, 100000, 400000);
+ controller_->SetBitrateObserver(&bitrate_observer_2, 200000, 200000, 400000);
+ controller_->SetBitrateObserver(&bitrate_observer_3, 200000, 300000, 400000);
+
+ // High REMB. Make sure the controllers get a fair share of the surplus
+ // (i.e., what is left after each controller gets its min rate).
+ bandwidth_observer_->OnReceivedEstimatedBitrate(690000);
+ // Verify that each observer gets its min rate (sum of min rates is 600000),
+ // and that the remaining 90000 is divided equally among the three.
+ EXPECT_EQ(130000u, bitrate_observer_1.last_bitrate_);
+ EXPECT_EQ(230000u, bitrate_observer_2.last_bitrate_);
+ EXPECT_EQ(330000u, bitrate_observer_3.last_bitrate_);
+
+ // High REMB, but below the sum of min bitrates.
+ bandwidth_observer_->OnReceivedEstimatedBitrate(500000);
+ // Verify that the first and second observers get their min bitrates, and the
+ // third gets the remainder.
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_); // Min bitrate.
+ EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_); // Min bitrate.
+ EXPECT_EQ(200000u, bitrate_observer_3.last_bitrate_); // Remainder.
+
+ // Low REMB.
+ bandwidth_observer_->OnReceivedEstimatedBitrate(1000);
+ // Verify that the first observer gets all the rate, and the rest get zero.
+ EXPECT_EQ(1000u, bitrate_observer_1.last_bitrate_);
+ EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_);
+ EXPECT_EQ(0u, bitrate_observer_3.last_bitrate_);
+
+ controller_->RemoveBitrateObserver(&bitrate_observer_1);
+ controller_->RemoveBitrateObserver(&bitrate_observer_2);
+ controller_->RemoveBitrateObserver(&bitrate_observer_3);
+}
+
+TEST_F(BitrateControllerTest, ThreeBitrateObserversLowRembEnforceMin) {
+ TestBitrateObserver bitrate_observer_1;
+ TestBitrateObserver bitrate_observer_2;
+ TestBitrateObserver bitrate_observer_3;
+ controller_->SetBitrateObserver(&bitrate_observer_1, 200000, 100000, 300000);
+ controller_->SetBitrateObserver(&bitrate_observer_2, 200000, 200000, 300000);
+ controller_->SetBitrateObserver(&bitrate_observer_3, 200000, 300000, 300000);
+
+ // Low REMB. Verify that all observers still get their respective min bitrate.
+ bandwidth_observer_->OnReceivedEstimatedBitrate(1000);
+ EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_); // Min cap.
+ EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_); // Min cap.
+ EXPECT_EQ(300000u, bitrate_observer_3.last_bitrate_); // Min cap.
+
+ controller_->RemoveBitrateObserver(&bitrate_observer_1);
+ controller_->RemoveBitrateObserver(&bitrate_observer_2);
+ controller_->RemoveBitrateObserver(&bitrate_observer_3);
+}
diff --git a/modules/bitrate_controller/include/bitrate_controller.h b/modules/bitrate_controller/include/bitrate_controller.h
index d74be16..ec03a14 100644
--- a/modules/bitrate_controller/include/bitrate_controller.h
+++ b/modules/bitrate_controller/include/bitrate_controller.h
@@ -43,7 +43,12 @@
* BitrateObservers.
*/
public:
- static BitrateController* CreateBitrateController();
+ // The argument |enforce_min_bitrate| controls the behavior when the available
+ // bitrate is lower than the minimum bitrate, or the sum of minimum bitrates.
+ // When true, the bitrate will never be set lower than the minimum bitrate(s).
+ // When false, the bitrate observers will be allocated rates up to their
+ // respective minimum bitrate, satisfying one observer after the other.
+ static BitrateController* CreateBitrateController(bool enforce_min_bitrate);
virtual ~BitrateController() {}
virtual RtcpBandwidthObserver* CreateRtcpBandwidthObserver() = 0;
diff --git a/modules/desktop_capture/desktop_frame.cc b/modules/desktop_capture/desktop_frame.cc
index 90e1fbd..f293baf 100644
--- a/modules/desktop_capture/desktop_frame.cc
+++ b/modules/desktop_capture/desktop_frame.cc
@@ -10,6 +10,8 @@
#include "webrtc/modules/desktop_capture/desktop_frame.h"
+#include <string.h>
+
namespace webrtc {
DesktopFrame::DesktopFrame(DesktopSize size,
@@ -35,6 +37,20 @@
delete[] data_;
}
+DesktopFrame* BasicDesktopFrame::CopyOf(const DesktopFrame& frame) {
+ DesktopFrame* result = new BasicDesktopFrame(frame.size());
+ for (int y = 0; y < frame.size().height(); ++y) {
+ memcpy(result->data() + y * result->stride(),
+ frame.data() + y * frame.stride(),
+ frame.size().width() * kBytesPerPixel);
+ }
+ result->set_dpi(frame.dpi());
+ result->set_capture_time_ms(frame.capture_time_ms());
+ *result->mutable_updated_region() = frame.updated_region();
+ return result;
+}
+
+
SharedMemoryDesktopFrame::SharedMemoryDesktopFrame(
DesktopSize size,
int stride,
diff --git a/modules/desktop_capture/desktop_frame.h b/modules/desktop_capture/desktop_frame.h
index a39eff7..b420a3c 100644
--- a/modules/desktop_capture/desktop_frame.h
+++ b/modules/desktop_capture/desktop_frame.h
@@ -83,6 +83,9 @@
explicit BasicDesktopFrame(DesktopSize size);
virtual ~BasicDesktopFrame();
+ // Creates a BasicDesktopFrame that contains copy of |frame|.
+ static DesktopFrame* CopyOf(const DesktopFrame& frame);
+
private:
DISALLOW_COPY_AND_ASSIGN(BasicDesktopFrame);
};
diff --git a/modules/desktop_capture/mouse_cursor.cc b/modules/desktop_capture/mouse_cursor.cc
index 67eb4bf..3f1ab3d 100644
--- a/modules/desktop_capture/mouse_cursor.cc
+++ b/modules/desktop_capture/mouse_cursor.cc
@@ -23,4 +23,10 @@
MouseCursor::~MouseCursor() {}
+// static
+MouseCursor* MouseCursor::CopyOf(const MouseCursor& cursor) {
+ return new MouseCursor(BasicDesktopFrame::CopyOf(cursor.image()),
+ cursor.hotspot());
+}
+
} // namespace webrtc
diff --git a/modules/desktop_capture/mouse_cursor.h b/modules/desktop_capture/mouse_cursor.h
index f37eeb3..4cf7708 100644
--- a/modules/desktop_capture/mouse_cursor.h
+++ b/modules/desktop_capture/mouse_cursor.h
@@ -25,8 +25,10 @@
MouseCursor(DesktopFrame* image, const DesktopVector& hotspot);
~MouseCursor();
- const DesktopFrame& image() { return *image_; }
- const DesktopVector& hotspot() { return hotspot_; }
+ static MouseCursor* CopyOf(const MouseCursor& cursor);
+
+ const DesktopFrame& image() const { return *image_; }
+ const DesktopVector& hotspot() const { return hotspot_; }
private:
scoped_ptr<DesktopFrame> image_;
diff --git a/modules/desktop_capture/mouse_cursor_monitor_mac.mm b/modules/desktop_capture/mouse_cursor_monitor_mac.mm
index f742dfa..6f9380a 100644
--- a/modules/desktop_capture/mouse_cursor_monitor_mac.mm
+++ b/modules/desktop_capture/mouse_cursor_monitor_mac.mm
@@ -10,19 +10,209 @@
#include "webrtc/modules/desktop_capture/mouse_cursor_monitor.h"
-#include <cstddef>
+#include <assert.h>
+#include <ApplicationServices/ApplicationServices.h>
+#include <Cocoa/Cocoa.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "webrtc/modules/desktop_capture/desktop_frame.h"
+#include "webrtc/modules/desktop_capture/mouse_cursor.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
-// TODO(sergeyu): Implement MouseCursorMonitor for Mac.
+class MouseCursorMonitorMac : public MouseCursorMonitor {
+ public:
+ MouseCursorMonitorMac(CGWindowID window_id);
+ virtual ~MouseCursorMonitorMac();
+
+ virtual void Init(Callback* callback, Mode mode) OVERRIDE;
+ virtual void Capture() OVERRIDE;
+
+ private:
+ void CaptureImage();
+
+ CGWindowID window_id_;
+
+ Callback* callback_;
+ Mode mode_;
+
+ scoped_ptr<MouseCursor> last_cursor_;
+};
+
+MouseCursorMonitorMac::MouseCursorMonitorMac(CGWindowID window_id)
+ : window_id_(window_id),
+ callback_(NULL),
+ mode_(SHAPE_AND_POSITION) {
+}
+
+MouseCursorMonitorMac::~MouseCursorMonitorMac() {}
+
+void MouseCursorMonitorMac::Init(Callback* callback, Mode mode) {
+ assert(!callback_);
+ assert(callback);
+
+ callback_ = callback;
+ mode_ = mode;
+}
+
+void MouseCursorMonitorMac::Capture() {
+ assert(callback_);
+
+ CaptureImage();
+
+ if (mode_ != SHAPE_AND_POSITION)
+ return;
+
+ CursorState state = INSIDE;
+
+ CGEventRef event = CGEventCreate(NULL);
+ CGPoint gc_position = CGEventGetLocation(event);
+ CFRelease(event);
+
+ DesktopVector position(gc_position.x, gc_position.y);
+
+ // If we are capturing cursor for a specific window then we need to figure out
+ // if the current mouse position is covered by another window and also adjust
+ // |position| to make it relative to the window origin.
+ if (window_id_ != kCGNullWindowID) {
+ // Get list of windows that may be covering parts of |window_id_|.
+ // CGWindowListCopyWindowInfo() returns windows in order from front to back,
+ // so |window_id_| is expected to be the last in the list.
+ CFArrayRef window_array =
+ CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly |
+ kCGWindowListOptionOnScreenAboveWindow |
+ kCGWindowListOptionIncludingWindow,
+ window_id_);
+ bool found_window = false;
+ if (window_array) {
+ CFIndex count = CFArrayGetCount(window_array);
+ for (CFIndex i = 0; i < count; ++i) {
+ CFDictionaryRef window = reinterpret_cast<CFDictionaryRef>(
+ CFArrayGetValueAtIndex(window_array, i));
+
+ // Skip the Dock window. Dock window covers the whole screen, but it is
+ // transparent.
+ CFStringRef window_name = reinterpret_cast<CFStringRef>(
+ CFDictionaryGetValue(window, kCGWindowName));
+ if (window_name && CFStringCompare(window_name, CFSTR("Dock"), 0) == 0)
+ continue;
+
+ CFDictionaryRef window_bounds = reinterpret_cast<CFDictionaryRef>(
+ CFDictionaryGetValue(window, kCGWindowBounds));
+ CFNumberRef window_number = reinterpret_cast<CFNumberRef>(
+ CFDictionaryGetValue(window, kCGWindowNumber));
+
+ if (window_bounds && window_number) {
+ CGRect gc_window_rect;
+ if (!CGRectMakeWithDictionaryRepresentation(window_bounds,
+ &gc_window_rect)) {
+ continue;
+ }
+ DesktopRect window_rect =
+ DesktopRect::MakeXYWH(gc_window_rect.origin.x,
+ gc_window_rect.origin.y,
+ gc_window_rect.size.width,
+ gc_window_rect.size.height);
+
+ CGWindowID window_id;
+ if (!CFNumberGetValue(window_number, kCFNumberIntType, &window_id))
+ continue;
+
+ if (window_id == window_id_) {
+ found_window = true;
+ if (!window_rect.Contains(position))
+ state = OUTSIDE;
+ position = position.subtract(window_rect.top_left());
+
+ assert(i == count - 1);
+ break;
+ } else if (window_rect.Contains(position)) {
+ state = OUTSIDE;
+ position.set(-1, -1);
+ break;
+ }
+ }
+ }
+
+ CFRelease(window_array);
+ }
+
+ if (!found_window) {
+ // If we failed to get list of windows or the window wasn't in the list
+ // pretend that the cursor is outside the window. This can happen, e.g. if
+ // the window was closed.
+ state = OUTSIDE;
+ position.set(-1, -1);
+ }
+ }
+
+ callback_->OnMouseCursorPosition(state, position);
+}
+
+void MouseCursorMonitorMac::CaptureImage() {
+ NSCursor* nscursor = [NSCursor currentSystemCursor];
+
+ NSImage* nsimage = [nscursor image];
+ NSSize nssize = [nsimage size];
+ DesktopSize size(nssize.width, nssize.height);
+ NSPoint nshotspot = [nscursor hotSpot];
+ DesktopVector hotspot(
+ std::min(0, std::max(size.width(), static_cast<int>(nshotspot.x))),
+ std::min(0, std::max(size.height(), static_cast<int>(nshotspot.y))));
+ CGImageRef cg_image =
+ [nsimage CGImageForProposedRect:NULL context:nil hints:nil];
+ if (!cg_image)
+ return;
+
+ if (CGImageGetBitsPerPixel(cg_image) != DesktopFrame::kBytesPerPixel * 8 ||
+ CGImageGetBytesPerRow(cg_image) !=
+ static_cast<size_t>(DesktopFrame::kBytesPerPixel * size.width()) ||
+ CGImageGetBitsPerComponent(cg_image) != 8) {
+ return;
+ }
+
+ CGDataProviderRef provider = CGImageGetDataProvider(cg_image);
+ CFDataRef image_data_ref = CGDataProviderCopyData(provider);
+ if (image_data_ref == NULL)
+ return;
+
+ const uint8_t* src_data =
+ reinterpret_cast<const uint8_t*>(CFDataGetBytePtr(image_data_ref));
+
+ // Compare the cursor with the previous one.
+ if (last_cursor_.get() &&
+ last_cursor_->image().size().equals(size) &&
+ last_cursor_->hotspot().equals(hotspot) &&
+ memcmp(last_cursor_->image().data(), src_data,
+ last_cursor_->image().stride() * size.height()) == 0) {
+ return;
+ }
+
+ // Create a MouseCursor that describes the cursor and pass it to
+ // the client.
+ scoped_ptr<DesktopFrame> image(
+ new BasicDesktopFrame(DesktopSize(size.width(), size.height())));
+ memcpy(image->data(), src_data,
+ size.width() * size.height() * DesktopFrame::kBytesPerPixel);
+
+ CFRelease(image_data_ref);
+
+ scoped_ptr<MouseCursor> cursor(new MouseCursor(image.release(), hotspot));
+ last_cursor_.reset(MouseCursor::CopyOf(*cursor));
+
+ callback_->OnMouseCursor(cursor.release());
+}
+
+
MouseCursorMonitor* MouseCursorMonitor::CreateForWindow(
const DesktopCaptureOptions& options, WindowId window) {
- return NULL;
+ return new MouseCursorMonitorMac(window);
}
MouseCursorMonitor* MouseCursorMonitor::CreateForScreen(
const DesktopCaptureOptions& options) {
- return NULL;
+ return new MouseCursorMonitorMac(kCGNullWindowID);
}
} // namespace webrtc
diff --git a/modules/desktop_capture/mouse_cursor_monitor_unittest.cc b/modules/desktop_capture/mouse_cursor_monitor_unittest.cc
index bbd5be4..18bf1ca 100644
--- a/modules/desktop_capture/mouse_cursor_monitor_unittest.cc
+++ b/modules/desktop_capture/mouse_cursor_monitor_unittest.cc
@@ -46,8 +46,11 @@
bool position_received_;
};
-// TODO(sergeyu): Enable tests on all platforms.
-#if defined(USE_X11)
+// TODO(sergeyu): On Mac we need to initialize NSApplication before running the
+// tests. Figure out how to do that without breaking other tests in
+// modules_unittests and enable these tests on Mac.
+// https://code.google.com/p/webrtc/issues/detail?id=2532
+#if !defined(WEBRTC_MAC)
#define MAYBE(x) x
#else
#define MAYBE(x) DISABLED_##x
diff --git a/modules/desktop_capture/mouse_cursor_monitor_win.cc b/modules/desktop_capture/mouse_cursor_monitor_win.cc
index 907129b..82f7d24 100644
--- a/modules/desktop_capture/mouse_cursor_monitor_win.cc
+++ b/modules/desktop_capture/mouse_cursor_monitor_win.cc
@@ -10,19 +10,103 @@
#include "webrtc/modules/desktop_capture/mouse_cursor_monitor.h"
-#include <cstddef>
+#include "webrtc/modules/desktop_capture/desktop_frame.h"
+#include "webrtc/modules/desktop_capture/mouse_cursor.h"
+#include "webrtc/modules/desktop_capture/win/cursor.h"
+#include "webrtc/system_wrappers/interface/logging.h"
namespace webrtc {
-// TODO(sergeyu): Implement MouseCursorMonitor for Windows.
+class MouseCursorMonitorWin : public MouseCursorMonitor {
+ public:
+ explicit MouseCursorMonitorWin(HWND window);
+ virtual ~MouseCursorMonitorWin();
+
+ virtual void Init(Callback* callback, Mode mode) OVERRIDE;
+ virtual void Capture() OVERRIDE;
+
+ private:
+ HWND window_;
+
+ Callback* callback_;
+ Mode mode_;
+
+ HDC desktop_dc_;
+
+ HCURSOR last_cursor_;
+};
+
+MouseCursorMonitorWin::MouseCursorMonitorWin(HWND window)
+ : window_(window),
+ callback_(NULL),
+ mode_(SHAPE_AND_POSITION),
+ desktop_dc_(NULL),
+ last_cursor_(NULL) {
+}
+
+MouseCursorMonitorWin::~MouseCursorMonitorWin() {
+ if (desktop_dc_)
+ ReleaseDC(NULL, desktop_dc_);
+}
+
+void MouseCursorMonitorWin::Init(Callback* callback, Mode mode) {
+ assert(!callback_);
+ assert(callback);
+
+ callback_ = callback;
+ mode_ = mode;
+
+ desktop_dc_ = GetDC(NULL);
+}
+
+void MouseCursorMonitorWin::Capture() {
+ assert(callback_);
+
+ CURSORINFO cursor_info;
+ cursor_info.cbSize = sizeof(CURSORINFO);
+ if (!GetCursorInfo(&cursor_info)) {
+ LOG_F(LS_ERROR) << "Unable to get cursor info. Error = " << GetLastError();
+ return;
+ }
+
+ if (last_cursor_ != cursor_info.hCursor) {
+ last_cursor_ = cursor_info.hCursor;
+ // Note that |cursor_info.hCursor| does not need to be freed.
+ scoped_ptr<MouseCursor> cursor(
+ CreateMouseCursorFromHCursor(desktop_dc_, cursor_info.hCursor));
+ if (cursor.get())
+ callback_->OnMouseCursor(cursor.release());
+ }
+
+ if (mode_ != SHAPE_AND_POSITION)
+ return;
+
+ DesktopVector position(cursor_info.ptScreenPos.x, cursor_info.ptScreenPos.y);
+ bool inside = cursor_info.flags == CURSOR_SHOWING;
+
+ if (window_) {
+ RECT rect;
+ if (!GetWindowRect(window_, &rect)) {
+ position.set(0, 0);
+ inside = false;
+ } else {
+ position = position.subtract(DesktopVector(rect.left, rect.top));
+ if (inside)
+ inside = (window_ == WindowFromPoint(cursor_info.ptScreenPos));
+ }
+ }
+
+ callback_->OnMouseCursorPosition(inside ? INSIDE : OUTSIDE, position);
+}
+
MouseCursorMonitor* MouseCursorMonitor::CreateForWindow(
const DesktopCaptureOptions& options, WindowId window) {
- return NULL;
+ return new MouseCursorMonitorWin(reinterpret_cast<HWND>(window));
}
MouseCursorMonitor* MouseCursorMonitor::CreateForScreen(
const DesktopCaptureOptions& options) {
- return NULL;
+ return new MouseCursorMonitorWin(NULL);
}
} // namespace webrtc
diff --git a/modules/desktop_capture/screen_capturer_win.cc b/modules/desktop_capture/screen_capturer_win.cc
index af9b18b..42a7192 100644
--- a/modules/desktop_capture/screen_capturer_win.cc
+++ b/modules/desktop_capture/screen_capturer_win.cc
@@ -17,6 +17,7 @@
#include "webrtc/modules/desktop_capture/desktop_frame_win.h"
#include "webrtc/modules/desktop_capture/desktop_region.h"
#include "webrtc/modules/desktop_capture/differ.h"
+#include "webrtc/modules/desktop_capture/mouse_cursor.h"
#include "webrtc/modules/desktop_capture/mouse_cursor_shape.h"
#include "webrtc/modules/desktop_capture/screen_capture_frame_queue.h"
#include "webrtc/modules/desktop_capture/screen_capturer_helper.h"
@@ -328,11 +329,19 @@
}
// Note that |cursor_info.hCursor| does not need to be freed.
- scoped_ptr<MouseCursorShape> cursor(
- CreateMouseCursorShapeFromCursor(desktop_dc_, cursor_info.hCursor));
- if (!cursor.get())
+ scoped_ptr<MouseCursor> cursor_image(
+ CreateMouseCursorFromHCursor(desktop_dc_, cursor_info.hCursor));
+ if (!cursor_image.get())
return;
+ scoped_ptr<MouseCursorShape> cursor(new MouseCursorShape);
+ cursor->hotspot = cursor_image->hotspot();
+ cursor->size = cursor_image->image().size();
+ cursor->data.assign(
+ cursor_image->image().data(),
+ cursor_image->image().data() +
+ cursor_image->image().stride() * DesktopFrame::kBytesPerPixel);
+
// Compare the current cursor with the last one we sent to the client. If
// they're the same, then don't bother sending the cursor again.
if (last_cursor_.size.equals(cursor->size) &&
diff --git a/modules/desktop_capture/win/cursor.cc b/modules/desktop_capture/win/cursor.cc
index 76eed77..11bb2db 100644
--- a/modules/desktop_capture/win/cursor.cc
+++ b/modules/desktop_capture/win/cursor.cc
@@ -15,6 +15,7 @@
#include "webrtc/modules/desktop_capture/win/scoped_gdi_object.h"
#include "webrtc/modules/desktop_capture/desktop_frame.h"
#include "webrtc/modules/desktop_capture/desktop_geometry.h"
+#include "webrtc/modules/desktop_capture/mouse_cursor.h"
#include "webrtc/system_wrappers/interface/compile_assert.h"
#include "webrtc/system_wrappers/interface/logging.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
@@ -77,7 +78,8 @@
// Premultiplies RGB components of the pixel data in the given image by
// the corresponding alpha components.
void AlphaMul(uint32_t* data, int width, int height) {
- COMPILE_ASSERT(sizeof(uint32_t) == kBytesPerPixel);
+ COMPILE_ASSERT(sizeof(uint32_t) == kBytesPerPixel,
+ size_of_uint32_should_be_the_bytes_per_pixel);
for (uint32_t* data_end = data + width * height; data != data_end; ++data) {
RGBQUAD* from = reinterpret_cast<RGBQUAD*>(data);
@@ -92,29 +94,24 @@
}
// Scans a 32bpp bitmap looking for any pixels with non-zero alpha component.
-// |*has_alpha| is set to true if non-zero alpha is found. |stride| is expressed
-// in pixels.
-bool HasAlphaChannel(const uint32_t* data, int stride, int width, int height,
- bool* has_alpha) {
+// Returns true if non-zero alpha is found. |stride| is expressed in pixels.
+bool HasAlphaChannel(const uint32_t* data, int stride, int width, int height) {
const RGBQUAD* plane = reinterpret_cast<const RGBQUAD*>(data);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
- if (plane->rgbReserved != 0) {
- *has_alpha = true;
+ if (plane->rgbReserved != 0)
return true;
- }
plane += 1;
}
plane += stride - width;
}
- *has_alpha = false;
- return true;
+ return false;
}
} // namespace
-MouseCursorShape* CreateMouseCursorShapeFromCursor(HDC dc, HCURSOR cursor) {
+MouseCursor* CreateMouseCursorFromHCursor(HDC dc, HCURSOR cursor) {
ICONINFO iinfo;
if (!GetIconInfo(cursor, &iinfo)) {
LOG_F(LS_ERROR) << "Unable to get cursor icon info. Error = "
@@ -167,20 +164,18 @@
}
uint32_t* mask_plane = mask_data.get();
-
- scoped_array<uint32_t> color_data;
- uint32_t* color_plane = NULL;
- int color_stride = 0;
+ scoped_ptr<DesktopFrame> image(
+ new BasicDesktopFrame(DesktopSize(width, height)));
bool has_alpha = false;
if (is_color) {
+ image.reset(new BasicDesktopFrame(DesktopSize(width, height)));
// Get the pixels from the color bitmap.
- color_data.reset(new uint32_t[width * height]);
if (!GetDIBits(dc,
scoped_color,
0,
height,
- color_data.get(),
+ image->data(),
reinterpret_cast<BITMAPINFO*>(&bmi),
DIB_RGB_COLORS)) {
LOG_F(LS_ERROR) << "Unable to get bitmap bits. Error = "
@@ -188,30 +183,28 @@
return NULL;
}
- color_plane = color_data.get();
- color_stride = width;
-
// GetDIBits() does not provide any indication whether the bitmap has alpha
// channel, so we use HasAlphaChannel() below to find it out.
- if (!HasAlphaChannel(color_plane, color_stride, width, height, &has_alpha))
- return NULL;
+ has_alpha = HasAlphaChannel(reinterpret_cast<uint32_t*>(image->data()),
+ width, width, height);
} else {
// For non-color cursors, the mask contains both an AND and an XOR mask and
// the height includes both. Thus, the width is correct, but we need to
// divide by 2 to get the correct mask height.
height /= 2;
+ image.reset(new BasicDesktopFrame(DesktopSize(width, height)));
+
// The XOR mask becomes the color bitmap.
- color_plane = mask_plane + (width * height);
- color_stride = width;
+ memcpy(
+ image->data(), mask_plane + (width * height), image->stride() * width);
}
// Reconstruct transparency from the mask if the color image does not has
// alpha channel.
if (!has_alpha) {
bool add_outline = false;
- uint32_t* color = color_plane;
- uint32_t* dst = color_plane;
+ uint32_t* dst = reinterpret_cast<uint32_t*>(image->data());
uint32_t* mask = mask_plane;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
@@ -226,36 +219,32 @@
// with black. In this case, we also add an outline around the cursor
// so that it is visible against a dark background.
if (*mask == kPixelRgbWhite) {
- if (*color != 0) {
+ if (*dst != 0) {
add_outline = true;
*dst = kPixelRgbaBlack;
} else {
*dst = kPixelRgbaTransparent;
}
} else {
- *dst = kPixelRgbaBlack ^ *color;
+ *dst = kPixelRgbaBlack ^ *dst;
}
- ++color;
++dst;
++mask;
}
}
if (add_outline) {
- AddCursorOutline(width, height, color_plane);
+ AddCursorOutline(
+ width, height, reinterpret_cast<uint32_t*>(image->data()));
}
}
- // Pre-multiply the resulting pixels since MouseCursorShape uses premultiplied
+ // Pre-multiply the resulting pixels since MouseCursor uses premultiplied
// images.
- AlphaMul(color_plane, width, height);
+ AlphaMul(reinterpret_cast<uint32_t*>(image->data()), width, height);
- scoped_ptr<MouseCursorShape> result(new MouseCursorShape());
- result->data.assign(reinterpret_cast<char*>(color_plane),
- height * width * kBytesPerPixel);
- result->size.set(width, height);
- result->hotspot.set(hotspot_x, hotspot_y);
- return result.release();
+ return new MouseCursor(
+ image.release(), DesktopVector(hotspot_x, hotspot_y));
}
} // namespace webrtc
diff --git a/modules/desktop_capture/win/cursor.h b/modules/desktop_capture/win/cursor.h
index 08a6c4a..d521cc0 100644
--- a/modules/desktop_capture/win/cursor.h
+++ b/modules/desktop_capture/win/cursor.h
@@ -13,13 +13,12 @@
#include <windows.h>
-#include "webrtc/modules/desktop_capture/mouse_cursor_shape.h"
-
namespace webrtc {
-// Converts a cursor into a |MouseCursorShape| instance.
-MouseCursorShape* CreateMouseCursorShapeFromCursor(
- HDC dc, HCURSOR cursor);
+class MouseCursor;
+
+// Converts an HCURSOR into a |MouseCursor| instance.
+MouseCursor* CreateMouseCursorFromHCursor(HDC dc, HCURSOR cursor);
} // namespace webrtc
diff --git a/modules/desktop_capture/win/cursor_unittest.cc b/modules/desktop_capture/win/cursor_unittest.cc
index c1c7417..f590bd2 100644
--- a/modules/desktop_capture/win/cursor_unittest.cc
+++ b/modules/desktop_capture/win/cursor_unittest.cc
@@ -9,7 +9,9 @@
*/
#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/desktop_capture/desktop_frame.h"
#include "webrtc/modules/desktop_capture/desktop_geometry.h"
+#include "webrtc/modules/desktop_capture/mouse_cursor.h"
#include "webrtc/modules/desktop_capture/win/cursor.h"
#include "webrtc/modules/desktop_capture/win/cursor_unittest_resources.h"
#include "webrtc/modules/desktop_capture/win/scoped_gdi_object.h"
@@ -19,9 +21,9 @@
namespace {
-// Loads |left| from resources, converts it to a |MouseCursorShape| instance
-// and compares pixels with |right|. Returns true of MouseCursorShape bits
-// match |right|. |right| must be a 32bpp cursor with alpha channel.
+// Loads |left| from resources, converts it to a |MouseCursor| instance and
+// compares pixels with |right|. Returns true of MouseCursor bits match |right|.
+// |right| must be a 32bpp cursor with alpha channel.
bool ConvertToMouseShapeAndCompare(unsigned left, unsigned right) {
HMODULE instance = GetModuleHandle(NULL);
@@ -32,8 +34,8 @@
// Convert |cursor| to |mouse_shape|.
HDC dc = GetDC(NULL);
- scoped_ptr<MouseCursorShape> mouse_shape(
- CreateMouseCursorShapeFromCursor(dc, cursor));
+ scoped_ptr<MouseCursor> mouse_shape(
+ CreateMouseCursorFromHCursor(dc, cursor));
ReleaseDC(NULL, dc);
EXPECT_TRUE(mouse_shape.get());
@@ -56,7 +58,7 @@
int width = bitmap_info.bmWidth;
int height = bitmap_info.bmHeight;
- EXPECT_TRUE(DesktopSize(width, height).equals(mouse_shape->size));
+ EXPECT_TRUE(DesktopSize(width, height).equals(mouse_shape->image().size()));
// Get the pixels from |scoped_color|.
int size = width * height;
@@ -64,13 +66,13 @@
EXPECT_TRUE(GetBitmapBits(scoped_color, size * sizeof(uint32_t), data.get()));
// Compare the 32bpp image in |mouse_shape| with the one loaded from |right|.
- return memcmp(data.get(), mouse_shape->data.data(),
+ return memcmp(data.get(), mouse_shape->image().data(),
size * sizeof(uint32_t)) == 0;
}
} // namespace
-TEST(MouseCursorShapeTest, MatchCursors) {
+TEST(MouseCursorTest, MatchCursors) {
EXPECT_TRUE(ConvertToMouseShapeAndCompare(IDD_CURSOR1_24BPP,
IDD_CURSOR1_32BPP));
diff --git a/modules/desktop_capture/window_capturer_mac.cc b/modules/desktop_capture/window_capturer_mac.cc
index 78f618d..6268fc0 100755
--- a/modules/desktop_capture/window_capturer_mac.cc
+++ b/modules/desktop_capture/window_capturer_mac.cc
@@ -41,29 +41,6 @@
return true;
}
-// DesktopFrame that stores data in CFData.
-class CFDataDesktopFrame : public DesktopFrame {
- public:
- // Consumes |cf_data| reference.
- //
- // TODO(sergeyu): Here we const_cast<> the buffer used in CFDataRef. CFDataRef
- // buffer is immutable, but DesktopFrame is always mutable. This shouldn't be
- // a problem because frames generated by WindowCapturers are normally not
- // mutated. To avoid this hack consider making DesktopFrame immutable and add
- // MutableDesktopFrame.
- CFDataDesktopFrame(DesktopSize size, int stride, CFDataRef cf_data)
- : DesktopFrame(size, stride,
- const_cast<uint8_t*>(CFDataGetBytePtr(cf_data)), NULL),
- cf_data_(cf_data) {
- }
- virtual ~CFDataDesktopFrame() {
- CFRelease(cf_data_);
- }
-
- private:
- CFDataRef cf_data_;
-};
-
class WindowCapturerMac : public WindowCapturer {
public:
WindowCapturerMac();
@@ -185,9 +162,18 @@
int width = CGImageGetWidth(window_image);
int height = CGImageGetHeight(window_image);
CGDataProviderRef provider = CGImageGetDataProvider(window_image);
- DesktopFrame* frame = new CFDataDesktopFrame(
- DesktopSize(width, height), CGImageGetBytesPerRow(window_image),
- CGDataProviderCopyData(provider));
+ CFDataRef cf_data = CGDataProviderCopyData(provider);
+ DesktopFrame* frame = new BasicDesktopFrame(
+ DesktopSize(width, height));
+
+ int src_stride = CGImageGetBytesPerRow(window_image);
+ const uint8_t* src_data = CFDataGetBytePtr(cf_data);
+ for (int y = 0; y < height; ++y) {
+ memcpy(frame->data() + frame->stride() * y, src_data + src_stride * y,
+ DesktopFrame::kBytesPerPixel * width);
+ }
+
+ CFRelease(cf_data);
CFRelease(window_image);
callback_->OnCaptureCompleted(frame);
diff --git a/modules/modules.gyp b/modules/modules.gyp
index 5d80d31..6c7912d 100644
--- a/modules/modules.gyp
+++ b/modules/modules.gyp
@@ -174,9 +174,13 @@
'pacing/paced_sender_unittest.cc',
'remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h',
'remote_bitrate_estimator/bitrate_estimator_unittest.cc',
+ 'remote_bitrate_estimator/bwe_test_framework.cc',
+ 'remote_bitrate_estimator/bwe_test_framework.h',
+ 'remote_bitrate_estimator/bwe_test_framework_unittest.cc',
'remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc',
'remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc',
'remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h',
+ 'remote_bitrate_estimator/remote_bitrate_estimators_test.cc',
'remote_bitrate_estimator/rtp_to_ntp_unittest.cc',
'rtp_rtcp/source/mock/mock_rtp_payload_strategy.h',
'rtp_rtcp/source/fec_receiver_unittest.cc',
diff --git a/modules/remote_bitrate_estimator/bwe_test_framework.cc b/modules/remote_bitrate_estimator/bwe_test_framework.cc
new file mode 100644
index 0000000..6a3b01d
--- /dev/null
+++ b/modules/remote_bitrate_estimator/bwe_test_framework.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/remote_bitrate_estimator/bwe_test_framework.h"
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+bool IsTimeSorted(const Packets& packets) {
+ PacketsConstIt last_it = packets.begin();
+ for (PacketsConstIt it = last_it; it != packets.end(); ++it) {
+ if (it != last_it && *it < *last_it) {
+ return false;
+ }
+ last_it = it;
+ }
+ return true;
+}
+
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
diff --git a/modules/remote_bitrate_estimator/bwe_test_framework.h b/modules/remote_bitrate_estimator/bwe_test_framework.h
new file mode 100644
index 0000000..1029db9
--- /dev/null
+++ b/modules/remote_bitrate_estimator/bwe_test_framework.h
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_BWE_TEST_FRAMEWORK_H_
+#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_BWE_TEST_FRAMEWORK_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstdio>
+#include <list>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/system_wrappers/interface/constructor_magic.h"
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+class Random {
+ public:
+ explicit Random(uint32_t seed)
+ : a_(0x531FDB97 ^ seed),
+ b_(0x6420ECA8 + seed) {
+ }
+
+ // Return semi-random number in the interval [0.0, 1.0].
+ float Rand() {
+ const float kScale = 1.0f / 0xffffffff;
+ float result = kScale * b_;
+ a_ ^= b_;
+ b_ += a_;
+ return result;
+ }
+
+ // Normal Distribution.
+ int Gaussian(int mean, int standard_deviation) {
+ // Creating a Normal distribution variable from two independent uniform
+ // variables based on the Box-Muller transform, which is defined on the
+ // interval (0, 1], hence the mask+add below.
+ const double kPi = 3.14159265358979323846;
+ const double kScale = 1.0 / 0x80000000ul;
+ double u1 = kScale * ((a_ & 0x7ffffffful) + 1);
+ double u2 = kScale * ((b_ & 0x7ffffffful) + 1);
+ a_ ^= b_;
+ b_ += a_;
+ return static_cast<int>(mean + standard_deviation *
+ std::sqrt(-2 * std::log(u1)) * std::cos(2 * kPi * u2));
+ }
+
+ private:
+ uint32_t a_;
+ uint32_t b_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Random);
+};
+
+template<typename T> class Stats {
+ public:
+ Stats()
+ : data_(),
+ last_mean_count_(0),
+ last_variance_count_(0),
+ last_minmax_count_(0),
+ mean_(0),
+ variance_(0),
+ min_(0),
+ max_(0) {
+ }
+
+ void Push(T data_point) {
+ data_.push_back(data_point);
+ }
+
+ T GetMean() {
+ if (last_mean_count_ != data_.size()) {
+ last_mean_count_ = data_.size();
+ mean_ = std::accumulate(data_.begin(), data_.end(), static_cast<T>(0));
+ assert(last_mean_count_ != 0);
+ mean_ /= static_cast<T>(last_mean_count_);
+ }
+ return mean_;
+ }
+ T GetVariance() {
+ if (last_variance_count_ != data_.size()) {
+ last_variance_count_ = data_.size();
+ T mean = GetMean();
+ variance_ = 0;
+ for (typename std::vector<T>::const_iterator it = data_.begin();
+ it != data_.end(); ++it) {
+ T diff = (*it - mean);
+ variance_ += diff * diff;
+ }
+ assert(last_variance_count_ != 0);
+ variance_ /= static_cast<T>(last_variance_count_);
+ }
+ return variance_;
+ }
+ T GetStdDev() {
+ return std::sqrt(static_cast<double>(GetVariance()));
+ }
+ T GetMin() {
+ RefreshMinMax();
+ return min_;
+ }
+ T GetMax() {
+ RefreshMinMax();
+ return max_;
+ }
+
+ void Log(const std::string& units) {
+ printf("%f %s\t+/-%f\t[%f,%f]",
+ GetMean(), units.c_str(), GetStdDev(), GetMin(), GetMax());
+ }
+
+ private:
+ void RefreshMinMax() {
+ if (last_minmax_count_ != data_.size()) {
+ last_minmax_count_ = data_.size();
+ min_ = max_ = 0;
+ if (data_.empty()) {
+ return;
+ }
+ typename std::vector<T>::const_iterator it = data_.begin();
+ min_ = max_ = *it;
+ while (++it != data_.end()) {
+ min_ = std::min(min_, *it);
+ max_ = std::max(max_, *it);
+ }
+ }
+ }
+
+ std::vector<T> data_;
+ typename std::vector<T>::size_type last_mean_count_;
+ typename std::vector<T>::size_type last_variance_count_;
+ typename std::vector<T>::size_type last_minmax_count_;
+ T mean_;
+ T variance_;
+ T min_;
+ T max_;
+};
+
+class BwePacket {
+ public:
+ BwePacket()
+ : send_time_us_(0),
+ payload_size_(0) {
+ memset(&header_, 0, sizeof(header_));
+ }
+
+ BwePacket(int64_t send_time_us, uint32_t payload_size,
+ const RTPHeader& header)
+ : send_time_us_(send_time_us),
+ payload_size_(payload_size),
+ header_(header) {
+ }
+
+ BwePacket(int64_t send_time_us, uint32_t sequence_number)
+ : send_time_us_(send_time_us),
+ payload_size_(0) {
+ memset(&header_, 0, sizeof(header_));
+ header_.sequenceNumber = sequence_number;
+ }
+
+ bool operator<(const BwePacket& rhs) const {
+ return send_time_us_ < rhs.send_time_us_;
+ }
+
+ void set_send_time_us(int64_t send_time_us) {
+ assert(send_time_us >= 0);
+ send_time_us_ = send_time_us;
+ }
+ int64_t send_time_us() const { return send_time_us_; }
+ uint32_t payload_size() const { return payload_size_; }
+ const RTPHeader& header() const { return header_; }
+
+ private:
+ int64_t send_time_us_; // Time the packet left last processor touching it.
+ uint32_t payload_size_; // Size of the (non-existent, simulated) payload.
+ RTPHeader header_; // Actual contents.
+};
+
+typedef std::list<BwePacket> Packets;
+typedef std::list<BwePacket>::iterator PacketsIt;
+typedef std::list<BwePacket>::const_iterator PacketsConstIt;
+
+bool IsTimeSorted(const Packets& packets);
+
+class PacketProcessorInterface {
+ public:
+ virtual ~PacketProcessorInterface() {}
+
+ // Run simulation for |time_ms| micro seconds, consuming packets from, and
+ // producing packets into in_out. The outgoing packet list must be sorted on
+ // |send_time_us_|. The simulation time |time_ms| is optional to use.
+ virtual void RunFor(int64_t time_ms, Packets* in_out) = 0;
+};
+
+class VideoSender : public PacketProcessorInterface {
+ public:
+ VideoSender(float fps, uint32_t kbps, uint32_t ssrc, float first_frame_offset)
+ : kMaxPayloadSizeBytes(1000),
+ kTimestampBase(0xff80ff00ul),
+ frame_period_ms_(1000.0 / fps),
+ next_frame_ms_(frame_period_ms_ * first_frame_offset),
+ now_ms_(0.0),
+ bytes_per_second_(1000 * kbps / 8),
+ frame_size_bytes_(bytes_per_second_ / fps),
+ prototype_header_() {
+ assert(first_frame_offset >= 0.0f);
+ assert(first_frame_offset < 1.0f);
+ memset(&prototype_header_, 0, sizeof(prototype_header_));
+ prototype_header_.ssrc = ssrc;
+ prototype_header_.sequenceNumber = 0xf000u;
+ }
+ virtual ~VideoSender() {}
+
+ uint32_t max_payload_size_bytes() const { return kMaxPayloadSizeBytes; }
+ uint32_t bytes_per_second() const { return bytes_per_second_; }
+
+ virtual void RunFor(int64_t time_ms, Packets* in_out) {
+ assert(in_out);
+ now_ms_ += time_ms;
+ Packets newPackets;
+ while (now_ms_ >= next_frame_ms_) {
+ prototype_header_.sequenceNumber++;
+ prototype_header_.timestamp = kTimestampBase +
+ static_cast<uint32_t>(next_frame_ms_ * 90.0);
+ prototype_header_.extension.absoluteSendTime = (kTimestampBase +
+ ((static_cast<int64_t>(next_frame_ms_ * (1 << 18)) + 500) / 1000)) &
+ 0x00fffffful;
+ prototype_header_.extension.transmissionTimeOffset = 0;
+
+ // Generate new packets for this frame, all with the same timestamp,
+ // but the payload size is capped, so if the whole frame doesn't fit in
+ // one packet, we will see a number of equally sized packets followed by
+ // one smaller at the tail.
+ int64_t send_time_us = next_frame_ms_ * 1000.0;
+ uint32_t payload_size = frame_size_bytes_;
+ while (payload_size > 0) {
+ uint32_t size = std::min(kMaxPayloadSizeBytes, payload_size);
+ newPackets.push_back(BwePacket(send_time_us, size, prototype_header_));
+ payload_size -= size;
+ }
+
+ next_frame_ms_ += frame_period_ms_;
+ }
+ in_out->merge(newPackets);
+ }
+
+ private:
+ const uint32_t kMaxPayloadSizeBytes;
+ const uint32_t kTimestampBase;
+ double frame_period_ms_;
+ double next_frame_ms_;
+ double now_ms_;
+ uint32_t bytes_per_second_;
+ uint32_t frame_size_bytes_;
+ RTPHeader prototype_header_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoSender);
+};
+
+class RateCounterFilter : public PacketProcessorInterface {
+ public:
+ RateCounterFilter()
+ : kWindowSizeUs(1000000),
+ packets_per_second_(0),
+ bytes_per_second_(0),
+ last_accumulated_us_(0),
+ window_(),
+ pps_stats_(),
+ kbps_stats_() {
+ }
+ virtual ~RateCounterFilter() {
+ LogStats();
+ }
+
+ uint32_t packets_per_second() const { return packets_per_second_; }
+ uint32_t bits_per_second() const { return bytes_per_second_ * 8; }
+
+ void LogStats() {
+ printf("RateCounterFilter ");
+ pps_stats_.Log("pps");
+ printf("\n");
+ printf("RateCounterFilter ");
+ kbps_stats_.Log("kbps");
+ printf("\n");
+ }
+
+ virtual void RunFor(int64_t /*time_ms*/, Packets* in_out) {
+ assert(in_out);
+ for (PacketsConstIt it = in_out->begin(); it != in_out->end(); ++it) {
+ packets_per_second_++;
+ bytes_per_second_ += it->payload_size();
+ last_accumulated_us_ = it->send_time_us();
+ }
+ window_.insert(window_.end(), in_out->begin(), in_out->end());
+ while (!window_.empty()) {
+ const BwePacket& packet = window_.front();
+ if (packet.send_time_us() > (last_accumulated_us_ - kWindowSizeUs)) {
+ break;
+ }
+ assert(packets_per_second_ >= 1);
+ assert(bytes_per_second_ >= packet.payload_size());
+ packets_per_second_--;
+ bytes_per_second_ -= packet.payload_size();
+ window_.pop_front();
+ }
+ pps_stats_.Push(packets_per_second_);
+ kbps_stats_.Push((bytes_per_second_ * 8) / 1000.0);
+ }
+
+ private:
+ const int64_t kWindowSizeUs;
+ uint32_t packets_per_second_;
+ uint32_t bytes_per_second_;
+ int64_t last_accumulated_us_;
+ Packets window_;
+ Stats<double> pps_stats_;
+ Stats<double> kbps_stats_;
+
+ DISALLOW_COPY_AND_ASSIGN(RateCounterFilter);
+};
+
+class LossFilter : public PacketProcessorInterface {
+ public:
+ LossFilter() : random_(0x12345678), loss_fraction_(0.0f) {}
+ virtual ~LossFilter() {}
+
+ void SetLoss(float loss_percent) {
+ assert(loss_percent >= 0.0f);
+ assert(loss_percent <= 100.0f);
+ loss_fraction_ = loss_percent * 0.01f;
+ }
+
+ virtual void RunFor(int64_t /*time_ms*/, Packets* in_out) {
+ assert(in_out);
+ for (PacketsIt it = in_out->begin(); it != in_out->end(); ) {
+ if (random_.Rand() < loss_fraction_) {
+ it = in_out->erase(it);
+ } else {
+ ++it;
+ }
+ }
+ }
+
+ private:
+ Random random_;
+ float loss_fraction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LossFilter);
+};
+
+class DelayFilter : public PacketProcessorInterface {
+ public:
+ DelayFilter() : delay_us_(0), last_send_time_us_(0) {}
+ virtual ~DelayFilter() {}
+
+ void SetDelay(int64_t delay_ms) {
+ assert(delay_ms >= 0);
+ delay_us_ = delay_ms * 1000;
+ }
+
+ virtual void RunFor(int64_t /*time_ms*/, Packets* in_out) {
+ assert(in_out);
+ for (PacketsIt it = in_out->begin(); it != in_out->end(); ++it) {
+ int64_t new_send_time_us = it->send_time_us() + delay_us_;
+ last_send_time_us_ = std::max(last_send_time_us_, new_send_time_us);
+ it->set_send_time_us(last_send_time_us_);
+ }
+ }
+
+ private:
+ int64_t delay_us_;
+ int64_t last_send_time_us_;
+
+ DISALLOW_COPY_AND_ASSIGN(DelayFilter);
+};
+
+class JitterFilter : public PacketProcessorInterface {
+ public:
+ JitterFilter()
+ : random_(0x89674523),
+ stddev_jitter_us_(0),
+ last_send_time_us_(0) {
+ }
+ virtual ~JitterFilter() {}
+
+ void SetJitter(int64_t stddev_jitter_ms) {
+ assert(stddev_jitter_ms >= 0);
+ stddev_jitter_us_ = stddev_jitter_ms * 1000;
+ }
+
+ virtual void RunFor(int64_t /*time_ms*/, Packets* in_out) {
+ assert(in_out);
+ for (PacketsIt it = in_out->begin(); it != in_out->end(); ++it) {
+ int64_t new_send_time_us = it->send_time_us();
+ new_send_time_us += random_.Gaussian(0, stddev_jitter_us_);
+ last_send_time_us_ = std::max(last_send_time_us_, new_send_time_us);
+ it->set_send_time_us(last_send_time_us_);
+ }
+ }
+
+ private:
+ Random random_;
+ int64_t stddev_jitter_us_;
+ int64_t last_send_time_us_;
+
+ DISALLOW_COPY_AND_ASSIGN(JitterFilter);
+};
+
+class ReorderFilter : public PacketProcessorInterface {
+ public:
+ ReorderFilter() : random_(0x27452389), reorder_fraction_(0.0f) {}
+ virtual ~ReorderFilter() {}
+
+ void SetReorder(float reorder_percent) {
+ assert(reorder_percent >= 0.0f);
+ assert(reorder_percent <= 100.0f);
+ reorder_fraction_ = reorder_percent * 0.01f;
+ }
+
+ virtual void RunFor(int64_t /*time_ms*/, Packets* in_out) {
+ assert(in_out);
+ if (in_out->size() >= 2) {
+ PacketsIt last_it = in_out->begin();
+ PacketsIt it = last_it;
+ while (++it != in_out->end()) {
+ if (random_.Rand() < reorder_fraction_) {
+ int64_t t1 = last_it->send_time_us();
+ int64_t t2 = it->send_time_us();
+ std::swap(*last_it, *it);
+ last_it->set_send_time_us(t1);
+ it->set_send_time_us(t2);
+ }
+ last_it = it;
+ }
+ }
+ }
+
+ private:
+ Random random_;
+ float reorder_fraction_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReorderFilter);
+};
+
+// Apply a bitrate choke with an infinite queue on the packet stream.
+class ChokeFilter : public PacketProcessorInterface {
+ public:
+ ChokeFilter() : kbps_(1200), last_send_time_us_(0) {}
+ virtual ~ChokeFilter() {}
+
+ void SetCapacity(uint32_t kbps) {
+ kbps_ = kbps;
+ }
+
+ virtual void RunFor(int64_t /*time_ms*/, Packets* in_out) {
+ assert(in_out);
+ for (PacketsIt it = in_out->begin(); it != in_out->end(); ++it) {
+ int64_t earliest_send_time_us = last_send_time_us_ +
+ (it->payload_size() * 8 * 1000 + kbps_ / 2) / kbps_;
+ last_send_time_us_ = std::max(it->send_time_us(), earliest_send_time_us);
+ it->set_send_time_us(last_send_time_us_);
+ }
+ }
+
+ private:
+ uint32_t kbps_;
+ int64_t last_send_time_us_;
+
+ DISALLOW_COPY_AND_ASSIGN(ChokeFilter);
+};
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_BWE_TEST_FRAMEWORK_H_
diff --git a/modules/remote_bitrate_estimator/bwe_test_framework_unittest.cc b/modules/remote_bitrate_estimator/bwe_test_framework_unittest.cc
new file mode 100644
index 0000000..0d1e7c1
--- /dev/null
+++ b/modules/remote_bitrate_estimator/bwe_test_framework_unittest.cc
@@ -0,0 +1,814 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/remote_bitrate_estimator/bwe_test_framework.h"
+
+#include <numeric>
+
+#include "gtest/gtest.h"
+#include "webrtc/system_wrappers/interface/constructor_magic.h"
+
+using std::vector;
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+TEST(BweTestFramework_RandomTest, Gaussian) {
+ enum {
+ kN = 100000,
+ kBuckets = 100,
+ kMean = 49,
+ kStddev = 10
+ };
+
+ Random random(0x12345678);
+
+ int buckets[kBuckets] = {0};
+ for (int i = 0; i < kN; ++i) {
+ int index = random.Gaussian(kMean, kStddev);
+ if (index >= 0 && index < kBuckets) {
+ buckets[index]++;
+ }
+ }
+
+ const double kPi = 3.14159265358979323846;
+ const double kScale = kN / (kStddev * std::sqrt(2.0 * kPi));
+ const double kDiv = -2.0 * kStddev * kStddev;
+ double self_corr = 0.0;
+ double bucket_corr = 0.0;
+ for (int n = 0; n < kBuckets; ++n) {
+ double normal_dist = kScale * std::exp((n - kMean) * (n - kMean) / kDiv);
+ self_corr += normal_dist * normal_dist;
+ bucket_corr += normal_dist * buckets[n];
+ }
+ printf("Correlation: %f (random sample), %f (self), %f (quotient)\n",
+ bucket_corr, self_corr, bucket_corr / self_corr);
+ EXPECT_NEAR(1.0, bucket_corr / self_corr, 0.0004);
+}
+
+static bool IsSequenceNumberSorted(const Packets& packets) {
+ PacketsConstIt last_it = packets.begin();
+ for (PacketsConstIt it = last_it; it != packets.end(); ++it) {
+ if (IsNewerSequenceNumber(last_it->header().sequenceNumber,
+ it->header().sequenceNumber)) {
+ return false;
+ }
+ last_it = it;
+ }
+ return true;
+}
+
+TEST(BweTestFramework_BwePacketTest, IsTimeSorted) {
+ Packets packets;
+ // Insert some packets in order...
+ EXPECT_TRUE(IsTimeSorted(packets));
+
+ packets.push_back(BwePacket(100, 0));
+ EXPECT_TRUE(IsTimeSorted(packets));
+
+ packets.push_back(BwePacket(110, 0));
+ EXPECT_TRUE(IsTimeSorted(packets));
+
+ // ...and one out-of-order...
+ packets.push_back(BwePacket(100, 0));
+ EXPECT_FALSE(IsTimeSorted(packets));
+
+ // ...remove the out-of-order packet, insert another in-order packet.
+ packets.pop_back();
+ packets.push_back(BwePacket(120, 0));
+ EXPECT_TRUE(IsTimeSorted(packets));
+}
+
+TEST(BweTestFramework_BwePacketTest, IsSequenceNumberSorted) {
+ Packets packets;
+ // Insert some packets in order...
+ EXPECT_TRUE(IsSequenceNumberSorted(packets));
+
+ packets.push_back(BwePacket(0, 100));
+ EXPECT_TRUE(IsSequenceNumberSorted(packets));
+
+ packets.push_back(BwePacket(0, 110));
+ EXPECT_TRUE(IsSequenceNumberSorted(packets));
+
+ // ...and one out-of-order...
+ packets.push_back(BwePacket(0, 100));
+ EXPECT_FALSE(IsSequenceNumberSorted(packets));
+
+ // ...remove the out-of-order packet, insert another in-order packet.
+ packets.pop_back();
+ packets.push_back(BwePacket(0, 120));
+ EXPECT_TRUE(IsSequenceNumberSorted(packets));
+}
+
+TEST(BweTestFramework_StatsTest, Mean) {
+ Stats<int32_t> stats;
+ EXPECT_EQ(0, stats.GetMean());
+
+ stats.Push(1);
+ stats.Push(3);
+ EXPECT_EQ(2, stats.GetMean());
+
+ // Integer division rounds (1+3-3)/3 to 0.
+ stats.Push(-3);
+ EXPECT_EQ(0, stats.GetMean());
+}
+
+TEST(BweTestFramework_StatsTest, Variance) {
+ Stats<int32_t> stats;
+ EXPECT_EQ(0, stats.GetVariance());
+
+ // Mean is 2 ; ((1-2)*(1-2)+(3-2)*(3-2))/2 = (1+1)/2 = 1
+ stats.Push(1);
+ stats.Push(3);
+ EXPECT_EQ(1, stats.GetVariance());
+
+ // Integer division rounds 26/3 to 8
+ // Mean is 0 ; (1*1+3*3+(-4)*(-4))/3 = (1+9+16)/3 = 8
+ stats.Push(-4);
+ EXPECT_EQ(8, stats.GetVariance());
+}
+
+TEST(BweTestFramework_StatsTest, StdDev) {
+ Stats<int32_t> stats;
+ EXPECT_EQ(0, stats.GetStdDev());
+
+ // Variance is 1 ; sqrt(1) = 1
+ stats.Push(1);
+ stats.Push(3);
+ EXPECT_EQ(1, stats.GetStdDev());
+
+ // Variance is 8 ; sqrt(8) = 2 with integers.
+ stats.Push(-4);
+ EXPECT_EQ(2, stats.GetStdDev());
+}
+
+TEST(BweTestFramework_StatsTest, MinMax) {
+ Stats<int32_t> stats;
+ EXPECT_EQ(0, stats.GetMin());
+ EXPECT_EQ(0, stats.GetMax());
+
+ stats.Push(1);
+ EXPECT_EQ(1, stats.GetMin());
+ EXPECT_EQ(1, stats.GetMax());
+
+ stats.Push(3);
+ EXPECT_EQ(1, stats.GetMin());
+ EXPECT_EQ(3, stats.GetMax());
+
+ stats.Push(-4);
+ EXPECT_EQ(-4, stats.GetMin());
+ EXPECT_EQ(3, stats.GetMax());
+}
+
+void TestVideoSender(VideoSender* sender, int64_t run_for_ms,
+ uint32_t expected_packets,
+ uint32_t expected_payload_size,
+ uint32_t expected_total_payload_size) {
+ assert(sender);
+ Packets packets;
+ sender->RunFor(run_for_ms, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ ASSERT_TRUE(IsSequenceNumberSorted(packets));
+ EXPECT_EQ(expected_packets, packets.size());
+ int64_t send_time_us = -1;
+ uint32_t total_payload_size = 0;
+ uint32_t absolute_send_time = 0;
+ uint32_t absolute_send_time_wraps = 0;
+ uint32_t rtp_timestamp = 0;
+ uint32_t rtp_timestamp_wraps = 0;
+ for (PacketsIt it = packets.begin(); it != packets.end(); ++it) {
+ EXPECT_LE(send_time_us, it->send_time_us());
+ send_time_us = it->send_time_us();
+ if (sender->max_payload_size_bytes() != it->payload_size()) {
+ EXPECT_EQ(expected_payload_size, it->payload_size());
+ }
+ total_payload_size += it->payload_size();
+ if (absolute_send_time > it->header().extension.absoluteSendTime) {
+ absolute_send_time_wraps++;
+ }
+ absolute_send_time = it->header().extension.absoluteSendTime;
+ if (rtp_timestamp > it->header().timestamp) {
+ rtp_timestamp_wraps++;
+ }
+ rtp_timestamp = it->header().timestamp;
+ }
+ EXPECT_EQ(expected_total_payload_size, total_payload_size);
+ EXPECT_GE(1u, absolute_send_time_wraps);
+ EXPECT_GE(1u, rtp_timestamp_wraps);
+}
+
+TEST(BweTestFramework_VideoSenderTest, Fps1Kpbs80_1s) {
+ // 1 fps, 80 kbps
+ VideoSender sender(1.0f, 80, 0x1234, 0);
+ EXPECT_EQ(10000u, sender.bytes_per_second());
+ // We're at 1 fps, so all packets should be generated on first call, giving 10
+ // packets of each 1000 bytes, total 10000 bytes.
+ TestVideoSender(&sender, 1, 10, 1000, 10000);
+ // 999ms, should see no output here.
+ TestVideoSender(&sender, 998, 0, 0, 0);
+ // 1999ms, should get data for one more frame.
+ TestVideoSender(&sender, 1000, 10, 1000, 10000);
+ // 2000ms, one more frame.
+ TestVideoSender(&sender, 1, 10, 1000, 10000);
+ // 2999ms, should see nothing.
+ TestVideoSender(&sender, 999, 0, 0, 0);
+}
+
+TEST(BweTestFramework_VideoSenderTest, Fps1Kpbs80_1s_Offset) {
+ // 1 fps, 80 kbps, offset 0.5 of a frame period, ==0.5s in this case.
+ VideoSender sender(1.0f, 80, 0x1234, 0.5f);
+ EXPECT_EQ(10000u, sender.bytes_per_second());
+ // 499ms, no output.
+ TestVideoSender(&sender, 499, 0, 0, 0);
+ // 500ms, first frame (this is the offset we set), 10 packets of 1000 bytes.
+ TestVideoSender(&sender, 1, 10, 1000, 10000);
+ // 1499ms, nothing.
+ TestVideoSender(&sender, 999, 0, 0, 0);
+ // 1999ms, second frame.
+ TestVideoSender(&sender, 500, 10, 1000, 10000);
+ // 2499ms, nothing.
+ TestVideoSender(&sender, 500, 0, 0, 0);
+ // 2500ms, third frame.
+ TestVideoSender(&sender, 1, 10, 1000, 10000);
+ // 3499ms, nothing.
+ TestVideoSender(&sender, 999, 0, 0, 0);
+}
+
+TEST(BweTestFramework_VideoSenderTest, Fps50Kpbs80_11s) {
+ // 50 fps, 80 kbps.
+ VideoSender sender(50.0f, 80, 0x1234, 0);
+ EXPECT_EQ(10000u, sender.bytes_per_second());
+ // 9998ms, should see 500 frames, 200 byte payloads, total 100000 bytes.
+ TestVideoSender(&sender, 9998, 500, 200, 100000);
+ // 9999ms, nothing.
+ TestVideoSender(&sender, 1, 0, 0, 0);
+ // 10000ms, 501st frame as a single packet.
+ TestVideoSender(&sender, 1, 1, 200, 200);
+ // 10998ms, 49 more frames.
+ TestVideoSender(&sender, 998, 49, 200, 9800);
+ // 10999ms, nothing.
+ TestVideoSender(&sender, 1, 0, 0, 0);
+}
+
+TEST(BweTestFramework_VideoSenderTest, Fps10Kpbs120_1s) {
+ // 20 fps, 120 kbps.
+ VideoSender sender(20.0f, 120, 0x1234, 0);
+ EXPECT_EQ(15000u, sender.bytes_per_second());
+ // 498ms, 10 frames with 750 byte payloads, total 7500 bytes.
+ TestVideoSender(&sender, 498, 10, 750, 7500);
+ // 499ms, nothing.
+ TestVideoSender(&sender, 1, 0, 0, 0);
+ // 500ms, one more frame.
+ TestVideoSender(&sender, 1, 1, 750, 750);
+ // 998ms, 9 more frames.
+ TestVideoSender(&sender, 498, 9, 750, 6750);
+ // 999ms, nothing.
+ TestVideoSender(&sender, 1, 0, 0, 0);
+}
+
+TEST(BweTestFramework_VideoSenderTest, Fps30Kpbs800_20s) {
+ // 20 fps, 820 kbps.
+ VideoSender sender(25.0f, 820, 0x1234, 0);
+ EXPECT_EQ(102500u, sender.bytes_per_second());
+ // 9998ms, 250 frames. 820 kbps = 102500 bytes/s, so total should be 1025000.
+ // Each frame is 102500/25=4100 bytes, or 5 packets (4 @1000 bytes, 1 @100),
+ // so packet count should be 5*250=1250 and last packet of each frame has
+ // 100 bytes of payload.
+ TestVideoSender(&sender, 9998, 1250, 100, 1025000);
+ // 9999ms, nothing.
+ TestVideoSender(&sender, 1, 0, 0, 0);
+ // 19998ms, 250 more frames.
+ TestVideoSender(&sender, 9999, 1250, 100, 1025000);
+ // 19999ms, nothing.
+ TestVideoSender(&sender, 1, 0, 0, 0);
+ // 20038ms, one more frame, as described above (25fps == 40ms/frame).
+ TestVideoSender(&sender, 39, 5, 100, 4100);
+ // 20039ms, nothing.
+ TestVideoSender(&sender, 1, 0, 0, 0);
+}
+
+TEST(BweTestFramework_VideoSenderTest, TestAppendInOrder) {
+ // 1 fps, 80 kbps, 250ms offset.
+ VideoSender sender1(1.0f, 80, 0x1234, 0.25f);
+ EXPECT_EQ(10000u, sender1.bytes_per_second());
+ Packets packets;
+ // Generate some packets, verify they are sorted.
+ sender1.RunFor(999, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ ASSERT_TRUE(IsSequenceNumberSorted(packets));
+ EXPECT_EQ(10u, packets.size());
+ // Generate some more packets and verify they are appended to end of list.
+ sender1.RunFor(1000, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ ASSERT_TRUE(IsSequenceNumberSorted(packets));
+ EXPECT_EQ(20u, packets.size());
+
+ // Another sender, 2 fps, 160 kpbs, 150ms offset
+ VideoSender sender2(2.0f, 160, 0x2234, 0.30f);
+ EXPECT_EQ(20000u, sender2.bytes_per_second());
+ // Generate some packets, verify that they are merged with the packets already
+ // on the list.
+ sender2.RunFor(999, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ EXPECT_EQ(40u, packets.size());
+ // Generate some more.
+ sender2.RunFor(1000, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ EXPECT_EQ(60u, packets.size());
+}
+
+class BweTestFramework_RateCounterFilterTest : public ::testing::Test {
+ public:
+ BweTestFramework_RateCounterFilterTest()
+ : filter_(),
+ now_ms_(0) {
+ }
+ virtual ~BweTestFramework_RateCounterFilterTest() {}
+
+ protected:
+ void TestRateCounter(int64_t run_for_ms, uint32_t payload_bits,
+ uint32_t expected_pps, uint32_t expected_bps) {
+ Packets packets;
+ RTPHeader header = {0};
+ // "Send" a packet every 10 ms.
+ for (int64_t i = 0; i < run_for_ms; i += 10, now_ms_ += 10) {
+ packets.push_back(BwePacket(now_ms_ * 1000, payload_bits / 8, header));
+ }
+ filter_.RunFor(run_for_ms, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ EXPECT_EQ(expected_pps, filter_.packets_per_second());
+ EXPECT_EQ(expected_bps, filter_.bits_per_second());
+ }
+
+ private:
+ RateCounterFilter filter_;
+ int64_t now_ms_;
+
+ DISALLOW_COPY_AND_ASSIGN(BweTestFramework_RateCounterFilterTest);
+};
+
+TEST_F(BweTestFramework_RateCounterFilterTest, Short) {
+ // 100ms, 100 bytes per packet, should result in 10 pps and 8 kbps. We're
+ // generating one packet every 10 ms ; 10 * 800 = 8k
+ TestRateCounter(100, 800, 10, 8000);
+}
+
+TEST_F(BweTestFramework_RateCounterFilterTest, Medium) {
+ // 100ms, like above.
+ TestRateCounter(100, 800, 10, 8000);
+ // 1000ms, 100 bpp, should result in 100 pps and 80 kbps. We're still
+ // generating packets every 10 ms.
+ TestRateCounter(900, 800, 100, 80000);
+}
+
+TEST_F(BweTestFramework_RateCounterFilterTest, Long) {
+ // 100ms, 1000ms, like above.
+ TestRateCounter(100, 800, 10, 8000);
+ TestRateCounter(900, 800, 100, 80000);
+ // 2000ms, should only see rate of last second, so 100 pps, and 40 kbps now.
+ TestRateCounter(1000, 400, 100, 40000);
+ // 2500ms, half a second with zero payload size. We should get same pps as
+ // before, but kbps should drop to half of previous rate.
+ TestRateCounter(500, 0, 100, 20000);
+ // Another half second with zero payload size. Now the kbps rate should drop
+ // to zero.
+ TestRateCounter(500, 0, 100, 0);
+ // Increate payload size again. 200 * 100 * 0.5 = 10 kbps.
+ TestRateCounter(500, 200, 100, 10000);
+}
+
+static void TestLossFilter(float loss_percent, bool zero_tolerance) {
+ LossFilter filter;
+ filter.SetLoss(loss_percent);
+ Packets::size_type sent_packets = 0;
+ Packets::size_type remaining_packets = 0;
+
+ // No input should yield no output
+ {
+ Packets packets;
+ sent_packets += packets.size();
+ filter.RunFor(0, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ ASSERT_TRUE(IsSequenceNumberSorted(packets));
+ remaining_packets += packets.size();
+ EXPECT_EQ(0u, sent_packets);
+ EXPECT_EQ(0u, remaining_packets);
+ }
+
+ // Generate and process 10000 packets in different batch sizes (some empty)
+ for (int i = 0; i < 2225; ++i) {
+ Packets packets;
+ packets.insert(packets.end(), i % 10, BwePacket());
+ sent_packets += packets.size();
+ filter.RunFor(0, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ ASSERT_TRUE(IsSequenceNumberSorted(packets));
+ remaining_packets += packets.size();
+ }
+
+ float loss_fraction = 0.01f * (100.0f - loss_percent);
+ Packets::size_type expected_packets = loss_fraction * sent_packets;
+ if (zero_tolerance) {
+ EXPECT_EQ(expected_packets, remaining_packets);
+ } else {
+ // Require within 1% of expected
+ EXPECT_NEAR(expected_packets, remaining_packets, 100);
+ }
+}
+
+TEST(BweTestFramework_LossFilterTest, Loss0) {
+ // With 0% loss, the result should be exact (no loss).
+ TestLossFilter(0.0f, true);
+}
+
+TEST(BweTestFramework_LossFilterTest, Loss10) {
+ TestLossFilter(10.0f, false);
+}
+
+TEST(BweTestFramework_LossFilterTest, Loss50) {
+ TestLossFilter(50.0f, false);
+}
+
+TEST(BweTestFramework_LossFilterTest, Loss100) {
+ // With 100% loss, the result should be exact (no packets out).
+ TestLossFilter(100.0f, true);
+}
+
+class BweTestFramework_DelayFilterTest : public ::testing::Test {
+ public:
+ BweTestFramework_DelayFilterTest()
+ : filter_(),
+ now_ms_(0),
+ sequence_number_(0) {
+ }
+ virtual ~BweTestFramework_DelayFilterTest() {}
+
+ protected:
+ void TestDelayFilter(int64_t run_for_ms, uint32_t in_packets,
+ uint32_t out_packets) {
+ Packets packets;
+ for (uint32_t i = 0; i < in_packets; ++i) {
+ packets.push_back(BwePacket(now_ms_ * 1000 + (sequence_number_ >> 4),
+ sequence_number_));
+ sequence_number_++;
+ }
+ filter_.RunFor(run_for_ms, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+ ASSERT_TRUE(IsSequenceNumberSorted(packets));
+ for (PacketsConstIt it = packets.begin(); it != packets.end(); ++it) {
+ EXPECT_LE(now_ms_ * 1000, it->send_time_us());
+ }
+ EXPECT_EQ(out_packets, packets.size());
+ accumulated_packets_.splice(accumulated_packets_.end(), packets);
+ now_ms_ += run_for_ms;
+ }
+
+ void TestDelayFilter(int64_t delay_ms) {
+ filter_.SetDelay(delay_ms);
+ TestDelayFilter(1, 0, 0); // No input should yield no output
+
+ // Single packet
+ TestDelayFilter(0, 1, 1);
+ TestDelayFilter(delay_ms, 0, 0);
+
+ for (int i = 0; i < delay_ms; ++i) {
+ filter_.SetDelay(i);
+ TestDelayFilter(1, 10, 10);
+ }
+ TestDelayFilter(0, 0, 0);
+ TestDelayFilter(delay_ms, 0, 0);
+
+ // Wait a little longer - should still see no output
+ TestDelayFilter(delay_ms, 0, 0);
+
+ for (int i = 1; i < delay_ms + 1; ++i) {
+ filter_.SetDelay(i);
+ TestDelayFilter(1, 5, 5);
+ }
+ TestDelayFilter(0, 0, 0);
+ filter_.SetDelay(2 * delay_ms);
+ TestDelayFilter(1, 0, 0);
+ TestDelayFilter(delay_ms, 13, 13);
+ TestDelayFilter(delay_ms, 0, 0);
+
+ // Wait a little longer - should still see no output
+ TestDelayFilter(delay_ms, 0, 0);
+
+ for (int i = 0; i < 2 * delay_ms; ++i) {
+ filter_.SetDelay(2 * delay_ms - i - 1);
+ TestDelayFilter(1, 5, 5);
+ }
+ TestDelayFilter(0, 0, 0);
+ filter_.SetDelay(0);
+ TestDelayFilter(0, 7, 7);
+
+ ASSERT_TRUE(IsTimeSorted(accumulated_packets_));
+ ASSERT_TRUE(IsSequenceNumberSorted(accumulated_packets_));
+ }
+
+ DelayFilter filter_;
+ Packets accumulated_packets_;
+
+ private:
+ int64_t now_ms_;
+ uint32_t sequence_number_;
+
+ DISALLOW_COPY_AND_ASSIGN(BweTestFramework_DelayFilterTest);
+};
+
+TEST_F(BweTestFramework_DelayFilterTest, Delay0) {
+ TestDelayFilter(1, 0, 0); // No input should yield no output
+ TestDelayFilter(1, 10, 10); // Expect no delay (delay time is zero)
+ TestDelayFilter(1, 0, 0); // Check no packets are still in buffer
+ filter_.SetDelay(0);
+ TestDelayFilter(1, 5, 5); // Expect no delay (delay time is zero)
+ TestDelayFilter(1, 0, 0); // Check no packets are still in buffer
+}
+
+TEST_F(BweTestFramework_DelayFilterTest, Delay1) {
+ TestDelayFilter(1);
+}
+
+TEST_F(BweTestFramework_DelayFilterTest, Delay2) {
+ TestDelayFilter(2);
+}
+
+TEST_F(BweTestFramework_DelayFilterTest, Delay20) {
+ TestDelayFilter(20);
+}
+
+TEST_F(BweTestFramework_DelayFilterTest, Delay100) {
+ TestDelayFilter(100);
+}
+
+TEST_F(BweTestFramework_DelayFilterTest, JumpToZeroDelay) {
+ DelayFilter delay;
+ Packets acc;
+ Packets packets;
+
+ // Delay a bunch of packets, accumulate them to the 'acc' list.
+ delay.SetDelay(100.0f);
+ for (uint32_t i = 0; i < 10; ++i) {
+ packets.push_back(BwePacket(i * 100, i));
+ }
+ delay.RunFor(1000, &packets);
+ acc.splice(acc.end(), packets);
+ ASSERT_TRUE(IsTimeSorted(acc));
+ ASSERT_TRUE(IsSequenceNumberSorted(acc));
+
+ // Drop delay to zero, send a few more packets through the delay, append them
+ // to the 'acc' list and verify that it is all sorted.
+ delay.SetDelay(0.0f);
+ for (uint32_t i = 10; i < 50; ++i) {
+ packets.push_back(BwePacket(i * 100, i));
+ }
+ delay.RunFor(1000, &packets);
+ acc.splice(acc.end(), packets);
+ ASSERT_TRUE(IsTimeSorted(acc));
+ ASSERT_TRUE(IsSequenceNumberSorted(acc));
+}
+
+TEST_F(BweTestFramework_DelayFilterTest, IncreasingDelay) {
+ // Gradually increase delay.
+ for (int i = 1; i < 50; i += 4) {
+ TestDelayFilter(i);
+ }
+ // Reach a steady state.
+ filter_.SetDelay(100);
+ TestDelayFilter(1, 20, 20);
+ TestDelayFilter(2, 0, 0);
+ TestDelayFilter(99, 20, 20);
+ // Drop delay back down to zero.
+ filter_.SetDelay(0);
+ TestDelayFilter(1, 100, 100);
+ TestDelayFilter(23010, 0, 0);
+ ASSERT_TRUE(IsTimeSorted(accumulated_packets_));
+ ASSERT_TRUE(IsSequenceNumberSorted(accumulated_packets_));
+}
+
+static void TestJitterFilter(int64_t stddev_jitter_ms) {
+ JitterFilter filter;
+ filter.SetJitter(stddev_jitter_ms);
+
+ int64_t now_ms = 0;
+ uint32_t sequence_number = 0;
+
+ // Generate packets, add jitter to them, accumulate the altered packets.
+ Packets original;
+ Packets jittered;
+ for (uint32_t i = 0; i < 1000; ++i) {
+ Packets packets;
+ for (uint32_t j = 0; j < i % 100; ++j) {
+ packets.push_back(BwePacket(now_ms * 1000, sequence_number++));
+ now_ms += 5 * stddev_jitter_ms;
+ }
+ original.insert(original.end(), packets.begin(), packets.end());
+ filter.RunFor(stddev_jitter_ms, &packets);
+ jittered.splice(jittered.end(), packets);
+ }
+
+ // Jittered packets should still be in order.
+ ASSERT_TRUE(IsTimeSorted(original));
+ ASSERT_TRUE(IsTimeSorted(jittered));
+ ASSERT_TRUE(IsSequenceNumberSorted(original));
+ ASSERT_TRUE(IsSequenceNumberSorted(jittered));
+ EXPECT_EQ(original.size(), jittered.size());
+
+ // Make sure jittered and original packets are in same order. Collect time
+ // difference (jitter) in stats, then check that mean jitter is close to zero
+ // and standard deviation of jitter is what we set it to.
+ Stats<double> jitter_us;
+ for (PacketsIt it1 = original.begin(), it2 = jittered.begin();
+ it1 != original.end() && it2 != jittered.end(); ++it1, ++it2) {
+ EXPECT_EQ(it1->header().sequenceNumber, it2->header().sequenceNumber);
+ jitter_us.Push(it2->send_time_us() - it1->send_time_us());
+ }
+ EXPECT_NEAR(0.0, jitter_us.GetMean(), stddev_jitter_ms * 1000.0 * 0.008);
+ EXPECT_NEAR(stddev_jitter_ms * 1000.0, jitter_us.GetStdDev(),
+ stddev_jitter_ms * 1000.0 * 0.02);
+}
+
+TEST(BweTestFramework_JitterFilterTest, Jitter0) {
+ TestJitterFilter(0);
+}
+
+TEST(BweTestFramework_JitterFilterTest, Jitter1) {
+ TestJitterFilter(1);
+}
+
+TEST(BweTestFramework_JitterFilterTest, Jitter5) {
+ TestJitterFilter(5);
+}
+
+TEST(BweTestFramework_JitterFilterTest, Jitter10) {
+ TestJitterFilter(10);
+}
+
+TEST(BweTestFramework_JitterFilterTest, Jitter1031) {
+ TestJitterFilter(1031);
+}
+
+static void TestReorderFilter(uint32_t reorder_percent, uint32_t near) {
+ const uint32_t kPacketCount = 10000;
+
+ // Generate packets with 10 ms interval.
+ Packets packets;
+ int64_t now_ms = 0;
+ uint32_t sequence_number = 1;
+ for (uint32_t i = 0; i < kPacketCount; ++i, now_ms += 10) {
+ packets.push_back(BwePacket(now_ms * 1000, sequence_number++));
+ }
+ ASSERT_TRUE(IsTimeSorted(packets));
+ ASSERT_TRUE(IsSequenceNumberSorted(packets));
+
+ // Reorder packets, verify that send times are still in order.
+ ReorderFilter filter;
+ filter.SetReorder(reorder_percent);
+ filter.RunFor(now_ms, &packets);
+ ASSERT_TRUE(IsTimeSorted(packets));
+
+ // We measure the amount of reordering by summing the distance by which out-
+ // of-order packets have been moved in the stream.
+ uint32_t distance = 0;
+ uint32_t last_sequence_number = 0;
+ for (PacketsIt it = packets.begin(); it != packets.end(); ++it) {
+ uint32_t sequence_number = it->header().sequenceNumber;
+ if (sequence_number < last_sequence_number) {
+ distance += last_sequence_number - sequence_number;
+ }
+ last_sequence_number = sequence_number;
+ }
+
+ // Because reordering is random, we allow a threshold when comparing. The
+ // maximum distance a packet can be moved is PacketCount - 1.
+ EXPECT_NEAR(((kPacketCount - 1) * reorder_percent) / 100, distance, near);
+}
+
+TEST(BweTestFramework_ReorderFilterTest, Reorder0) {
+ // For 0% reordering, no packets should have been moved, so result is exact.
+ TestReorderFilter(0, 0);
+}
+
+TEST(BweTestFramework_ReorderFilterTest, Reorder10) {
+ TestReorderFilter(10, 30);
+}
+
+TEST(BweTestFramework_ReorderFilterTest, Reorder20) {
+ TestReorderFilter(20, 20);
+}
+
+TEST(BweTestFramework_ReorderFilterTest, Reorder50) {
+ TestReorderFilter(50, 20);
+}
+
+TEST(BweTestFramework_ReorderFilterTest, Reorder70) {
+ TestReorderFilter(70, 20);
+}
+
+TEST(BweTestFramework_ReorderFilterTest, Reorder100) {
+ // Note that because the implementation works by optionally swapping two
+ // adjacent packets, when the likelihood of a swap is 1.0, a swap will always
+ // occur, so the stream will be in order except for the first packet, which
+ // has been moved to the end. Therefore we expect the result to be exact here.
+ TestReorderFilter(100.0, 0);
+}
+
+class BweTestFramework_ChokeFilterTest : public ::testing::Test {
+ public:
+ BweTestFramework_ChokeFilterTest()
+ : filter_(),
+ now_ms_(0),
+ sequence_number_(0),
+ output_packets_() {
+ }
+ virtual ~BweTestFramework_ChokeFilterTest() {}
+
+ protected:
+ void TestChoke(int64_t run_for_ms, uint32_t packets_to_generate,
+ uint32_t choke_kbps, uint32_t expected_kbit_transmitted) {
+ // Generate a bunch of packets, apply choke, verify output is ordered.
+ Packets packets;
+ RTPHeader header = {0};
+ for (uint32_t i = 0; i < packets_to_generate; ++i) {
+ int64_t send_time_ms = now_ms_ + (i * run_for_ms) / packets_to_generate;
+ header.sequenceNumber = sequence_number_++;
+ // Payload is 1000 bits.
+ packets.push_back(BwePacket(send_time_ms * 1000, 125, header));
+ }
+ ASSERT_TRUE(IsTimeSorted(packets));
+ filter_.SetCapacity(choke_kbps);
+ filter_.RunFor(run_for_ms, &packets);
+ now_ms_ += run_for_ms;
+ output_packets_.splice(output_packets_.end(), packets);
+ ASSERT_TRUE(IsTimeSorted(output_packets_));
+ ASSERT_TRUE(IsSequenceNumberSorted(output_packets_));
+
+ // Sum up the transmitted bytes up until the current time.
+ uint32_t bytes_transmitted = 0;
+ while (!output_packets_.empty()) {
+ const BwePacket& packet = output_packets_.front();
+ if (packet.send_time_us() > now_ms_ * 1000) {
+ break;
+ }
+ bytes_transmitted += packet.payload_size();
+ output_packets_.pop_front();
+ }
+ EXPECT_EQ(expected_kbit_transmitted, (bytes_transmitted * 8) / 1000);
+ }
+
+ private:
+ ChokeFilter filter_;
+ int64_t now_ms_;
+ uint32_t sequence_number_;
+ Packets output_packets_;
+
+ DISALLOW_COPY_AND_ASSIGN(BweTestFramework_ChokeFilterTest);
+};
+
+TEST_F(BweTestFramework_ChokeFilterTest, Short) {
+ // 100ms, 100 packets, 10 kbps choke -> 1 kbit of data should have propagated.
+ // That is actually just a single packet, since each packet has 1000 bits of
+ // payload.
+ TestChoke(100, 100, 10, 1);
+}
+
+TEST_F(BweTestFramework_ChokeFilterTest, Medium) {
+ // 100ms, 10 packets, 10 kbps choke -> 1 packet through, or 1 kbit.
+ TestChoke(100, 10, 10, 1);
+ // 200ms, no new packets, same choke -> another packet through.
+ TestChoke(100, 0, 10, 1);
+ // 1000ms, no new packets, same choke -> 8 more packets.
+ TestChoke(800, 0, 10, 8);
+ // 2000ms, no new packets, same choke -> queue is empty so no output.
+ TestChoke(1000, 0, 10, 0);
+}
+
+TEST_F(BweTestFramework_ChokeFilterTest, Long) {
+ // 100ms, 100 packets in queue, 10 kbps choke -> 1 packet through, or 1 kbit.
+ TestChoke(100, 100, 10, 1);
+ // 200ms, no input, another packet through.
+ TestChoke(100, 0, 10, 1);
+ // 1000ms, no input, 8 packets through.
+ TestChoke(800, 0, 10, 8);
+ // 10000ms, no input, raise choke to 100 kbps. Remaining 90 packets in queue
+ // should be propagated, for a total of 90 kbps.
+ TestChoke(9000, 0, 100, 90);
+ // 10100ms, 20 more packets, 100 kbps choke -> 10 packets or 10 kbit through.
+ TestChoke(100, 20, 100, 10);
+ // 10300ms, 10 more packets, same choke -> 20 packets out.
+ TestChoke(200, 10, 100, 20);
+ // 11300ms, no input, queue should be empty.
+ TestChoke(1000, 0, 10, 0);
+}
+
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
diff --git a/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h b/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
index a2f6bcf..5bf8af9 100644
--- a/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
+++ b/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
@@ -72,7 +72,8 @@
Clock* clock) const;
};
-struct AbsoluteSendTimeRemoteBitrateEstimatorFactory {
+struct AbsoluteSendTimeRemoteBitrateEstimatorFactory
+ : public RemoteBitrateEstimatorFactory {
AbsoluteSendTimeRemoteBitrateEstimatorFactory() {}
virtual ~AbsoluteSendTimeRemoteBitrateEstimatorFactory() {}
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc
new file mode 100644
index 0000000..29c9ab1
--- /dev/null
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "gtest/gtest.h"
+#include "webrtc/modules/remote_bitrate_estimator/bwe_test_framework.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/constructor_magic.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+#define ENABLE_1_SENDER 1
+#define ENABLE_3_SENDERS 1
+#define ENABLE_10_SENDERS 1
+#define ENABLE_BASIC_TESTS 1
+#define ENABLE_LOSS_TESTS 0
+#define ENABLE_DELAY_TESTS 0
+#define ENABLE_JITTER_TESTS 0
+#define ENABLE_REORDER_TESTS 0
+#define ENABLE_CHOKE_TESTS 0
+#define ENABLE_MULTI_TESTS 0
+
+#define ENABLE_TOF_ESTIMATOR 1
+#define ENABLE_AST_ESTIMATOR 1
+
+using std::vector;
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+const int64_t kSimulationIntervalMs = 1000;
+
+namespace stl_helpers {
+template<typename T> void DeleteElements(T* container) {
+ if (!container) return;
+ for (typename T::iterator it = container->begin(); it != container->end();
+ ++it) {
+ delete *it;
+ }
+ container->clear();
+}
+} // namespace stl_helpers
+
+class TestedEstimator : public RemoteBitrateObserver {
+ public:
+ TestedEstimator(const std::string& debug_name,
+ const RemoteBitrateEstimatorFactory& factory)
+ : debug_name_(debug_name),
+ clock_(0),
+ stats_(),
+ relative_estimator_stats_(),
+ latest_estimate_kbps_(-1.0),
+ estimator_(factory.Create(this, &clock_)),
+ relative_estimator_(NULL) {
+ assert(estimator_.get());
+ // Default RTT in RemoteRateControl is 200 ms ; 50 ms is more realistic.
+ estimator_->OnRttUpdate(50);
+ }
+
+ void SetRelativeEstimator(TestedEstimator* relative_estimator) {
+ relative_estimator_ = relative_estimator;
+ }
+
+ void EatPacket(const BwePacket& packet) {
+ latest_estimate_kbps_ = -1.0;
+
+ // We're treating the send time (from previous filter) as the arrival
+ // time once packet reaches the estimator.
+ int64_t packet_time_ms = (packet.send_time_us() + 500) / 1000;
+
+ int64_t step_ms = estimator_->TimeUntilNextProcess();
+ while ((clock_.TimeInMilliseconds() + step_ms) < packet_time_ms) {
+ clock_.AdvanceTimeMilliseconds(step_ms);
+ estimator_->Process();
+ step_ms = estimator_->TimeUntilNextProcess();
+ }
+
+ estimator_->IncomingPacket(packet_time_ms, packet.payload_size(),
+ packet.header());
+ clock_.AdvanceTimeMilliseconds(packet_time_ms -
+ clock_.TimeInMilliseconds());
+ ASSERT_TRUE(packet_time_ms == clock_.TimeInMilliseconds());
+ }
+
+ void CheckEstimate() {
+ double estimated_kbps = 0.0;
+ if (LatestEstimate(&estimated_kbps)) {
+ stats_.Push(estimated_kbps);
+ double relative_estimate_kbps = 0.0;
+ if (relative_estimator_ &&
+ relative_estimator_->LatestEstimate(&relative_estimate_kbps)) {
+ relative_estimator_stats_.Push(estimated_kbps - relative_estimate_kbps);
+ }
+ }
+ }
+
+ void LogStats() {
+ printf("%s Mean ", debug_name_.c_str());
+ stats_.Log("kbps");
+ printf("\n");
+ if (relative_estimator_) {
+ printf("%s Diff ", debug_name_.c_str());
+ relative_estimator_stats_.Log("kbps");
+ printf("\n");
+ }
+ }
+
+ virtual void OnReceiveBitrateChanged(const vector<unsigned int>& ssrcs,
+ unsigned int bitrate) {
+ }
+
+ private:
+ bool LatestEstimate(double* estimate_kbps) {
+ if (latest_estimate_kbps_ < 0.0) {
+ vector<unsigned int> ssrcs;
+ unsigned int bps = 0;
+ if (!estimator_->LatestEstimate(&ssrcs, &bps)) {
+ return false;
+ }
+ latest_estimate_kbps_ = bps / 1000.0;
+ }
+ *estimate_kbps = latest_estimate_kbps_;
+ return true;
+ }
+
+ std::string debug_name_;
+ bool log_estimates_;
+ SimulatedClock clock_;
+ Stats<double> stats_;
+ Stats<double> relative_estimator_stats_;
+ double latest_estimate_kbps_;
+ scoped_ptr<RemoteBitrateEstimator> estimator_;
+ TestedEstimator* relative_estimator_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TestedEstimator);
+};
+
+class RemoteBitrateEstimatorsTest : public ::testing::Test {
+ public:
+ RemoteBitrateEstimatorsTest()
+ : run_time_ms_(0),
+ estimators_(),
+ previous_packets_(),
+ processors_(),
+ video_senders_() {
+ }
+ virtual ~RemoteBitrateEstimatorsTest() {
+ stl_helpers::DeleteElements(&estimators_);
+ stl_helpers::DeleteElements(&video_senders_);
+ }
+
+ virtual void SetUp() {
+#if ENABLE_TOF_ESTIMATOR
+ estimators_.push_back(new TestedEstimator("TOF",
+ RemoteBitrateEstimatorFactory()));
+#endif
+#if ENABLE_AST_ESTIMATOR
+ estimators_.push_back(new TestedEstimator("AST",
+ AbsoluteSendTimeRemoteBitrateEstimatorFactory()));
+#endif
+ // Set all estimators as relative to the first one.
+ for (uint32_t i = 1; i < estimators_.size(); ++i) {
+ estimators_[i]->SetRelativeEstimator(estimators_[0]);
+ }
+ }
+
+ protected:
+ void RunFor(int64_t time_ms) {
+ for (run_time_ms_ += time_ms; run_time_ms_ >= kSimulationIntervalMs;
+ run_time_ms_ -= kSimulationIntervalMs) {
+ Packets packets;
+ for (vector<PacketProcessorInterface*>::const_iterator it =
+ processors_.begin(); it != processors_.end(); ++it) {
+ (*it)->RunFor(kSimulationIntervalMs, &packets);
+ }
+
+ // Verify packets are in order between batches.
+ if (!packets.empty() && !previous_packets_.empty()) {
+ packets.splice(packets.begin(), previous_packets_,
+ --previous_packets_.end());
+ ASSERT_TRUE(IsTimeSorted(packets));
+ packets.erase(packets.begin());
+ } else {
+ ASSERT_TRUE(IsTimeSorted(packets));
+ }
+
+ for (PacketsConstIt pit = packets.begin(); pit != packets.end(); ++pit) {
+ for (vector<TestedEstimator*>::iterator eit = estimators_.begin();
+ eit != estimators_.end(); ++eit) {
+ (*eit)->EatPacket(*pit);
+ }
+ }
+
+ previous_packets_.swap(packets);
+
+ for (vector<TestedEstimator*>::iterator eit = estimators_.begin();
+ eit != estimators_.end(); ++eit) {
+ (*eit)->CheckEstimate();
+ }
+ }
+ }
+
+ void AddVideoSenders(uint32_t count) {
+ struct { float fps; uint32_t kbps; uint32_t ssrc; float frame_offset; }
+ configs[] = {
+ { 30.00f, 150, 0x1234, 0.13f },
+ { 15.00f, 500, 0x2345, 0.16f },
+ { 30.00f, 1200, 0x3456, 0.26f },
+ { 7.49f, 150, 0x4567, 0.05f },
+ { 7.50f, 150, 0x5678, 0.15f },
+ { 7.51f, 150, 0x6789, 0.25f },
+ { 15.02f, 150, 0x7890, 0.27f },
+ { 15.03f, 150, 0x8901, 0.38f },
+ { 30.02f, 150, 0x9012, 0.39f },
+ { 30.03f, 150, 0x0123, 0.52f }
+ };
+ assert(count <= sizeof(configs) / sizeof(configs[0]));
+ uint32_t total_capacity = 0;
+ for (uint32_t i = 0; i < count; ++i) {
+ video_senders_.push_back(new VideoSender(configs[i].fps, configs[i].kbps,
+ configs[i].ssrc, configs[i].frame_offset));
+ processors_.push_back(video_senders_.back());
+ total_capacity += configs[i].kbps;
+ }
+ printf("RequiredLinkCapacity %d kbps\n", total_capacity);
+ }
+
+ void LogStats() {
+ for (vector<TestedEstimator*>::iterator eit = estimators_.begin();
+ eit != estimators_.end(); ++eit) {
+ (*eit)->LogStats();
+ }
+ }
+
+ void UnlimitedSpeedTest() {
+ RunFor(10 * 60 * 1000);
+ }
+
+ void SteadyLossTest() {
+ LossFilter loss;
+ processors_.push_back(&loss);
+ loss.SetLoss(20.0);
+ RunFor(10 * 60 * 1000);
+ }
+ void IncreasingLoss1Test() {
+ LossFilter loss;
+ processors_.push_back(&loss);
+ for (int i = 0; i < 76; ++i) {
+ loss.SetLoss(i);
+ RunFor(5000);
+ }
+ }
+
+ void SteadyDelayTest() {
+ DelayFilter delay;
+ processors_.push_back(&delay);
+ delay.SetDelay(1000);
+ RunFor(10 * 60 * 1000);
+ }
+ void IncreasingDelay1Test() {
+ DelayFilter delay;
+ processors_.push_back(&delay);
+ RunFor(10 * 60 * 1000);
+ for (int i = 0; i < 30 * 2; ++i) {
+ delay.SetDelay(i);
+ RunFor(10 * 1000);
+ }
+ RunFor(10 * 60 * 1000);
+ }
+ void IncreasingDelay2Test() {
+ DelayFilter delay;
+ RateCounterFilter counter;
+ processors_.push_back(&delay);
+ processors_.push_back(&counter);
+ RunFor(1 * 60 * 1000);
+ for (int i = 1; i < 51; ++i) {
+ delay.SetDelay(10.0f * i);
+ RunFor(10 * 1000);
+ }
+ delay.SetDelay(0.0f);
+ RunFor(10 * 60 * 1000);
+ }
+ void JumpyDelay1Test() {
+ DelayFilter delay;
+ processors_.push_back(&delay);
+ RunFor(10 * 60 * 1000);
+ for (int i = 1; i < 200; ++i) {
+ delay.SetDelay((10 * i) % 500);
+ RunFor(1000);
+ delay.SetDelay(1.0f);
+ RunFor(1000);
+ }
+ delay.SetDelay(0.0f);
+ RunFor(10 * 60 * 1000);
+ }
+
+ void SteadyJitterTest() {
+ JitterFilter jitter;
+ RateCounterFilter counter;
+ processors_.push_back(&jitter);
+ processors_.push_back(&counter);
+ jitter.SetJitter(120);
+ RunFor(10 * 60 * 1000);
+ }
+ void IncreasingJitter1Test() {
+ JitterFilter jitter;
+ processors_.push_back(&jitter);
+ for (int i = 0; i < 2 * 60 * 2; ++i) {
+ jitter.SetJitter(i);
+ RunFor(10 * 1000);
+ }
+ RunFor(10 * 60 * 1000);
+ }
+ void IncreasingJitter2Test() {
+ JitterFilter jitter;
+ processors_.push_back(&jitter);
+ RunFor(30 * 1000);
+ for (int i = 1; i < 51; ++i) {
+ jitter.SetJitter(10.0f * i);
+ RunFor(10 * 1000);
+ }
+ jitter.SetJitter(0.0f);
+ RunFor(10 * 60 * 1000);
+ }
+
+ void SteadyReorderTest() {
+ ReorderFilter reorder;
+ processors_.push_back(&reorder);
+ reorder.SetReorder(20.0);
+ RunFor(10 * 60 * 1000);
+ }
+ void IncreasingReorder1Test() {
+ ReorderFilter reorder;
+ processors_.push_back(&reorder);
+ for (int i = 0; i < 76; ++i) {
+ reorder.SetReorder(i);
+ RunFor(5000);
+ }
+ }
+
+ void SteadyChokeTest() {
+ ChokeFilter choke;
+ processors_.push_back(&choke);
+ choke.SetCapacity(140);
+ RunFor(10 * 60 * 1000);
+ }
+ void IncreasingChoke1Test() {
+ ChokeFilter choke;
+ processors_.push_back(&choke);
+ for (int i = 1200; i >= 100; i -= 100) {
+ choke.SetCapacity(i);
+ RunFor(5000);
+ }
+ }
+ void IncreasingChoke2Test() {
+ ChokeFilter choke;
+ processors_.push_back(&choke);
+ RunFor(60 * 1000);
+ for (int i = 1200; i >= 100; i -= 20) {
+ choke.SetCapacity(i);
+ RunFor(1000);
+ }
+ }
+
+ void Multi1Test() {
+ DelayFilter delay;
+ ChokeFilter choke;
+ RateCounterFilter counter;
+ processors_.push_back(&delay);
+ processors_.push_back(&choke);
+ processors_.push_back(&counter);
+ choke.SetCapacity(1000);
+ RunFor(1 * 60 * 1000);
+ for (int i = 1; i < 51; ++i) {
+ delay.SetDelay(100.0f * i);
+ RunFor(10 * 1000);
+ }
+ delay.SetDelay(0.0f);
+ RunFor(5 * 60 * 1000);
+ }
+ void Multi2Test() {
+ ChokeFilter choke;
+ JitterFilter jitter;
+ RateCounterFilter counter;
+ processors_.push_back(&choke);
+ processors_.push_back(&jitter);
+ processors_.push_back(&counter);
+ choke.SetCapacity(2000);
+ jitter.SetJitter(120);
+ RunFor(5 * 60 * 1000);
+ }
+
+ private:
+ int64_t run_time_ms_;
+ vector<TestedEstimator*> estimators_;
+ Packets previous_packets_;
+ vector<PacketProcessorInterface*> processors_;
+ vector<VideoSender*> video_senders_;
+
+ DISALLOW_COPY_AND_ASSIGN(RemoteBitrateEstimatorsTest);
+};
+
+#define SINGLE_TEST(enabled, test_name, video_senders)\
+ TEST_F(RemoteBitrateEstimatorsTest, test_name##_##video_senders##Sender) {\
+ if (enabled) {\
+ AddVideoSenders(video_senders);\
+ test_name##Test();\
+ LogStats();\
+ }\
+ }
+
+#define MULTI_TEST(enabled, test_name)\
+ SINGLE_TEST((enabled) && ENABLE_1_SENDER, test_name, 1)\
+ SINGLE_TEST((enabled) && ENABLE_3_SENDERS, test_name, 3)\
+ SINGLE_TEST((enabled) && ENABLE_10_SENDERS, test_name, 10)
+
+MULTI_TEST(ENABLE_BASIC_TESTS, UnlimitedSpeed)
+MULTI_TEST(ENABLE_LOSS_TESTS, SteadyLoss)
+MULTI_TEST(ENABLE_LOSS_TESTS, IncreasingLoss1)
+MULTI_TEST(ENABLE_DELAY_TESTS, SteadyDelay)
+MULTI_TEST(ENABLE_DELAY_TESTS, IncreasingDelay1)
+MULTI_TEST(ENABLE_DELAY_TESTS, IncreasingDelay2)
+MULTI_TEST(ENABLE_DELAY_TESTS, JumpyDelay1)
+MULTI_TEST(ENABLE_JITTER_TESTS, SteadyJitter)
+MULTI_TEST(ENABLE_JITTER_TESTS, IncreasingJitter1)
+MULTI_TEST(ENABLE_JITTER_TESTS, IncreasingJitter2)
+MULTI_TEST(ENABLE_REORDER_TESTS, SteadyReorder)
+MULTI_TEST(ENABLE_REORDER_TESTS, IncreasingReorder1)
+MULTI_TEST(ENABLE_CHOKE_TESTS, SteadyChoke)
+MULTI_TEST(ENABLE_CHOKE_TESTS, IncreasingChoke1)
+MULTI_TEST(ENABLE_CHOKE_TESTS, IncreasingChoke2)
+MULTI_TEST(ENABLE_MULTI_TESTS, Multi1)
+MULTI_TEST(ENABLE_MULTI_TESTS, Multi2)
+
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc b/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc
index bcd1d10..1d8b2be 100644
--- a/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_format_vp8_unittest.cc
@@ -8,19 +8,20 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
/*
* This file includes unit tests for the VP8 packetizer.
*/
#include "testing/gtest/include/gtest/gtest.h"
-
-#include "webrtc/system_wrappers/interface/compile_assert.h"
-
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h"
+#include "webrtc/system_wrappers/interface/compile_assert.h"
#include "webrtc/typedefs.h"
+#define CHECK_ARRAY_SIZE(expected_size, array) \
+ COMPILE_ASSERT(expected_size == sizeof(array) / sizeof(array[0]), \
+ check_array_size);
+
namespace webrtc {
class RtpFormatVp8Test : public ::testing::Test {
@@ -63,10 +64,8 @@
const bool kExpectedFragStart[] =
{true, false, true, true, false, false, false};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
kExpectedFragStart, kExpectedNum);
@@ -91,10 +90,8 @@
const int kExpectedPart[] = {0, 0, 0, 1};
const bool kExpectedFragStart[] = {true, false, false, true};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
kExpectedFragStart, kExpectedNum);
@@ -119,10 +116,8 @@
const int kExpectedPart[] = {0, 0, 1, 5};
const bool kExpectedFragStart[] = {true, false, true, true};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
kExpectedFragStart, kExpectedNum);
@@ -147,10 +142,8 @@
const int kExpectedPart[] = {0, 0, 1, 4, 4, 5};
const bool kExpectedFragStart[] = {true, false, true, true, false, true};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
kExpectedFragStart, kExpectedNum);
@@ -175,10 +168,8 @@
const int kExpectedPart[] = {0, 0, 1, 1};
const bool kExpectedFragStart[] = {true, false, true, false};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
kExpectedFragStart, kExpectedNum);
@@ -203,10 +194,8 @@
// Frag start only true for first packet in equal size mode.
const bool kExpectedFragStart[] = {true, false, false, false};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->set_sloppy_partitioning(true);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
@@ -232,10 +221,8 @@
// Frag start only true for first packet in equal size mode.
const bool kExpectedFragStart[] = {true, false};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->set_sloppy_partitioning(true);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
@@ -265,10 +252,8 @@
const int kExpectedPart[1] = {0}; // Packet starts with partition 0.
const bool kExpectedFragStart[1] = {true};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
kExpectedFragStart, kExpectedNum);
@@ -295,10 +280,8 @@
const int kExpectedPart[1] = {0}; // Packet starts with partition 0.
const bool kExpectedFragStart[1] = {true};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
kExpectedFragStart, kExpectedNum);
@@ -326,10 +309,8 @@
const int kExpectedPart[1] = {0}; // Packet starts with partition 0.
const bool kExpectedFragStart[1] = {true};
const int kExpectedNum = sizeof(kExpectedSizes) / sizeof(kExpectedSizes[0]);
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedPart) / sizeof(kExpectedPart[0]));
- COMPILE_ASSERT(kExpectedNum ==
- sizeof(kExpectedFragStart) / sizeof(kExpectedFragStart[0]));
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedPart);
+ CHECK_ARRAY_SIZE(kExpectedNum, kExpectedFragStart);
helper_->GetAllPacketsAndCheck(&packetizer, kExpectedSizes, kExpectedPart,
kExpectedFragStart, kExpectedNum);
diff --git a/modules/rtp_rtcp/source/rtp_utility.cc b/modules/rtp_rtcp/source/rtp_utility.cc
index bc0be8b..f50b20a 100644
--- a/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/modules/rtp_rtcp/source/rtp_utility.cc
@@ -125,16 +125,12 @@
}
#endif
-#if !defined(WEBRTC_LITTLE_ENDIAN) && !defined(WEBRTC_BIG_ENDIAN)
-#error Either WEBRTC_LITTLE_ENDIAN or WEBRTC_BIG_ENDIAN must be defined
-#endif
-
/* for RTP/RTCP
All integer fields are carried in network byte order, that is, most
significant byte (octet) first. AKA big-endian.
*/
void AssignUWord32ToBuffer(uint8_t* dataBuffer, uint32_t value) {
-#if defined(WEBRTC_LITTLE_ENDIAN)
+#if defined(WEBRTC_ARCH_LITTLE_ENDIAN)
dataBuffer[0] = static_cast<uint8_t>(value >> 24);
dataBuffer[1] = static_cast<uint8_t>(value >> 16);
dataBuffer[2] = static_cast<uint8_t>(value >> 8);
@@ -146,7 +142,7 @@
}
void AssignUWord24ToBuffer(uint8_t* dataBuffer, uint32_t value) {
-#if defined(WEBRTC_LITTLE_ENDIAN)
+#if defined(WEBRTC_ARCH_LITTLE_ENDIAN)
dataBuffer[0] = static_cast<uint8_t>(value >> 16);
dataBuffer[1] = static_cast<uint8_t>(value >> 8);
dataBuffer[2] = static_cast<uint8_t>(value);
@@ -158,7 +154,7 @@
}
void AssignUWord16ToBuffer(uint8_t* dataBuffer, uint16_t value) {
-#if defined(WEBRTC_LITTLE_ENDIAN)
+#if defined(WEBRTC_ARCH_LITTLE_ENDIAN)
dataBuffer[0] = static_cast<uint8_t>(value >> 8);
dataBuffer[1] = static_cast<uint8_t>(value);
#else
@@ -168,7 +164,7 @@
}
uint16_t BufferToUWord16(const uint8_t* dataBuffer) {
-#if defined(WEBRTC_LITTLE_ENDIAN)
+#if defined(WEBRTC_ARCH_LITTLE_ENDIAN)
return (dataBuffer[0] << 8) + dataBuffer[1];
#else
return *reinterpret_cast<const uint16_t*>(dataBuffer);
@@ -180,7 +176,7 @@
}
uint32_t BufferToUWord32(const uint8_t* dataBuffer) {
-#if defined(WEBRTC_LITTLE_ENDIAN)
+#if defined(WEBRTC_ARCH_LITTLE_ENDIAN)
return (dataBuffer[0] << 24) + (dataBuffer[1] << 16) + (dataBuffer[2] << 8) +
dataBuffer[3];
#else
diff --git a/modules/utility/source/rtp_dump_impl.cc b/modules/utility/source/rtp_dump_impl.cc
index 1f8715d..39316f4 100644
--- a/modules/utility/source/rtp_dump_impl.cc
+++ b/modules/utility/source/rtp_dump_impl.cc
@@ -245,37 +245,25 @@
gettimeofday(&tv, &tz);
val = tv.tv_sec * 1000 + tv.tv_usec / 1000;
return val;
-#else
- #error Either _WIN32 or LINUX or WEBRTC_MAC has to be defined!
- assert(false);
- return 0;
#endif
}
inline uint32_t RtpDumpImpl::RtpDumpHtonl(uint32_t x) const
{
-#if defined(WEBRTC_BIG_ENDIAN)
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
return x;
-#elif defined(WEBRTC_LITTLE_ENDIAN)
+#elif defined(WEBRTC_ARCH_LITTLE_ENDIAN)
return (x >> 24) + ((((x >> 16) & 0xFF) << 8) + ((((x >> 8) & 0xFF) << 16) +
((x & 0xFF) << 24)));
-#else
-#error Either WEBRTC_BIG_ENDIAN or WEBRTC_LITTLE_ENDIAN has to be defined!
- assert(false);
- return 0;
#endif
}
inline uint16_t RtpDumpImpl::RtpDumpHtons(uint16_t x) const
{
-#if defined(WEBRTC_BIG_ENDIAN)
+#if defined(WEBRTC_ARCH_BIG_ENDIAN)
return x;
-#elif defined(WEBRTC_LITTLE_ENDIAN)
+#elif defined(WEBRTC_ARCH_LITTLE_ENDIAN)
return (x >> 8) + ((x & 0xFF) << 8);
-#else
- #error Either WEBRTC_BIG_ENDIAN or WEBRTC_LITTLE_ENDIAN has to be defined!
- assert(false);
- return 0;
#endif
}
} // namespace webrtc
diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc
index f4abecb..a23d22b 100644
--- a/modules/video_capture/video_capture_impl.cc
+++ b/modules/video_capture/video_capture_impl.cc
@@ -164,7 +164,7 @@
_captureCallBack(NULL),
_lastProcessFrameCount(TickTime::Now()),
_rotateFrame(kRotateNone),
- last_capture_time_(TickTime::MillisecondTimestamp()),
+ last_capture_time_(0),
delta_ntp_internal_ms_(
Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() -
TickTime::MillisecondTimestamp()) {
diff --git a/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/modules/video_coding/codecs/vp8/default_temporal_layers.cc
index 62d9ae6..a92a87a 100644
--- a/modules/video_coding/codecs/vp8/default_temporal_layers.cc
+++ b/modules/video_coding/codecs/vp8/default_temporal_layers.cc
@@ -36,6 +36,13 @@
memset(temporal_pattern_, 0, sizeof(temporal_pattern_));
}
+int DefaultTemporalLayers::CurrentLayerId() const {
+ assert(temporal_ids_length_ > 0);
+ int index = pattern_idx_ % temporal_ids_length_;
+ assert(index >= 0);
+ return temporal_ids_[index];
+ }
+
bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
int max_bitrate_kbit,
int framerate,
@@ -247,8 +254,7 @@
vp8_info->temporalIdx = 0;
vp8_info->layerSync = true;
} else {
- vp8_info->temporalIdx = temporal_ids_
- [pattern_idx_ % temporal_ids_length_];
+ vp8_info->temporalIdx = CurrentLayerId();
TemporalReferences temporal_reference =
temporal_pattern_[pattern_idx_ % temporal_pattern_length_];
diff --git a/modules/video_coding/codecs/vp8/default_temporal_layers.h b/modules/video_coding/codecs/vp8/default_temporal_layers.h
index 99b0e95..61f281f 100644
--- a/modules/video_coding/codecs/vp8/default_temporal_layers.h
+++ b/modules/video_coding/codecs/vp8/default_temporal_layers.h
@@ -37,6 +37,8 @@
virtual void FrameEncoded(unsigned int size, uint32_t timestamp) {}
+ virtual int CurrentLayerId() const;
+
private:
enum TemporalReferences {
// For 1 layer case: reference all (last, golden, and alt ref), but only
diff --git a/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc b/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
index ba7412b..f16c756 100644
--- a/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
+++ b/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
@@ -189,7 +189,7 @@
return encode_flags_[index];
}
- unsigned int CurrentLayerId() const {
+ virtual int CurrentLayerId() const {
assert(layer_ids_length_ > 0 && layer_ids_ != NULL);
int index = frame_counter_ % layer_ids_length_;
assert(index >= 0 && index < layer_ids_length_);
diff --git a/modules/video_coding/codecs/vp8/temporal_layers.h b/modules/video_coding/codecs/vp8/temporal_layers.h
index 4549e28..7ca4840 100644
--- a/modules/video_coding/codecs/vp8/temporal_layers.h
+++ b/modules/video_coding/codecs/vp8/temporal_layers.h
@@ -49,6 +49,8 @@
uint32_t timestamp) = 0;
virtual void FrameEncoded(unsigned int size, uint32_t timestamp) = 0;
+
+ virtual int CurrentLayerId() const = 0;
};
// Factory for a temporal layers strategy that adaptively changes the number of
diff --git a/modules/video_coding/main/test/rtp_player.cc b/modules/video_coding/main/test/rtp_player.cc
index 6af4389..9965143 100644
--- a/modules/video_coding/main/test/rtp_player.cc
+++ b/modules/video_coding/main/test/rtp_player.cc
@@ -326,7 +326,6 @@
float loss_rate, uint32_t rtt_ms, bool reordering)
: ssrc_handlers_(payload_sink_factory, payload_types),
clock_(clock),
- packet_source_(NULL),
next_rtp_time_(0),
first_packet_(true),
first_packet_rtp_time_(0),
diff --git a/system_wrappers/interface/asm_defines.h b/system_wrappers/interface/asm_defines.h
index c432ec0..4b839a9 100644
--- a/system_wrappers/interface/asm_defines.h
+++ b/system_wrappers/interface/asm_defines.h
@@ -56,4 +56,4 @@
.text
-#endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_COMPILE_ASSERT_H_
+#endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_ASM_DEFINES_H_
diff --git a/system_wrappers/interface/compile_assert.h b/system_wrappers/interface/compile_assert.h
index 4feda86..0c8776d 100644
--- a/system_wrappers/interface/compile_assert.h
+++ b/system_wrappers/interface/compile_assert.h
@@ -8,14 +8,73 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+// Borrowed from Chromium's src/base/basictypes.h.
+
#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_COMPILE_ASSERT_H_
#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_COMPILE_ASSERT_H_
-/* Use this macro to verify at compile time that certain restrictions are met.
- * The argument is the boolean expression to evaluate.
- * Example:
- * COMPILE_ASSERT(sizeof(foo) < 128);
-*/
-#define COMPILE_ASSERT(expression) switch(0){case 0: case expression:;}
+// The COMPILE_ASSERT macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
+// content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+template <bool>
+struct CompileAssert {
+};
+
+#undef COMPILE_ASSERT
+#define COMPILE_ASSERT(expr, msg) \
+ typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
+
+// Implementation details of COMPILE_ASSERT:
+//
+// - COMPILE_ASSERT works by defining an array type that has -1
+// elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+// does not work, as gcc supports variable-length arrays whose sizes
+// are determined at run-time (this is gcc's extension and not part
+// of the C++ standard). As a result, gcc fails to reject the
+// following code with the simple definition:
+//
+// int foo;
+// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
+// // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+// expr is a compile-time constant. (Template arguments must be
+// determined at compile-time.)
+//
+// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
+// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
+//
+// CompileAssert<bool(expr)>
+//
+// instead, these compilers will refuse to compile
+//
+// COMPILE_ASSERT(5 > 0, some_message);
+//
+// (They seem to think the ">" in "5 > 0" marks the end of the
+// template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+// ((expr) ? 1 : -1).
+//
+// This is to avoid running into a bug in MS VC 7.1, which
+// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
#endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_COMPILE_ASSERT_H_
diff --git a/system_wrappers/interface/compile_assert_c.h b/system_wrappers/interface/compile_assert_c.h
new file mode 100644
index 0000000..d9ba866
--- /dev/null
+++ b/system_wrappers/interface/compile_assert_c.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_COMPILE_ASSERT_H_
+#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_COMPILE_ASSERT_H_
+
+// Only use this for C files. For C++, use compile_assert.h.
+//
+// Use this macro to verify at compile time that certain restrictions are met.
+// The argument is the boolean expression to evaluate.
+// Example:
+// COMPILE_ASSERT(sizeof(foo) < 128);
+#define COMPILE_ASSERT(expression) switch (0) {case 0: case expression:;}
+
+#endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_COMPILE_ASSERT_H_
diff --git a/system_wrappers/interface/move.h b/system_wrappers/interface/move.h
new file mode 100644
index 0000000..d828c32
--- /dev/null
+++ b/system_wrappers/interface/move.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/move.h.
+
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INTEFACE_MOVE_H_
+#define WEBRTC_SYSTEM_WRAPPERS_INTEFACE_MOVE_H_
+
+// Macro with the boilerplate that makes a type move-only in C++03.
+//
+// USAGE
+//
+// This macro should be used instead of DISALLOW_COPY_AND_ASSIGN to create
+// a "move-only" type. Unlike DISALLOW_COPY_AND_ASSIGN, this macro should be
+// the first line in a class declaration.
+//
+// A class using this macro must call .Pass() (or somehow be an r-value already)
+// before it can be:
+//
+// * Passed as a function argument
+// * Used as the right-hand side of an assignment
+// * Returned from a function
+//
+// Each class will still need to define their own "move constructor" and "move
+// operator=" to make this useful. Here's an example of the macro, the move
+// constructor, and the move operator= from the scoped_ptr class:
+//
+// template <typename T>
+// class scoped_ptr {
+// MOVE_ONLY_TYPE_FOR_CPP_03(scoped_ptr, RValue)
+// public:
+// scoped_ptr(RValue& other) : ptr_(other.release()) { }
+// scoped_ptr& operator=(RValue& other) {
+// swap(other);
+// return *this;
+// }
+// };
+//
+// Note that the constructor must NOT be marked explicit.
+//
+// For consistency, the second parameter to the macro should always be RValue
+// unless you have a strong reason to do otherwise. It is only exposed as a
+// macro parameter so that the move constructor and move operator= don't look
+// like they're using a phantom type.
+//
+//
+// HOW THIS WORKS
+//
+// For a thorough explanation of this technique, see:
+//
+// http://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Move_Constructor
+//
+// The summary is that we take advantage of 2 properties:
+//
+// 1) non-const references will not bind to r-values.
+// 2) C++ can apply one user-defined conversion when initializing a
+// variable.
+//
+// The first lets us disable the copy constructor and assignment operator
+// by declaring private version of them with a non-const reference parameter.
+//
+// For l-values, direct initialization still fails like in
+// DISALLOW_COPY_AND_ASSIGN because the copy constructor and assignment
+// operators are private.
+//
+// For r-values, the situation is different. The copy constructor and
+// assignment operator are not viable due to (1), so we are trying to call
+// a non-existent constructor and non-existing operator= rather than a private
+// one. Since we have not committed an error quite yet, we can provide an
+// alternate conversion sequence and a constructor. We add
+//
+// * a private struct named "RValue"
+// * a user-defined conversion "operator RValue()"
+// * a "move constructor" and "move operator=" that take the RValue& as
+// their sole parameter.
+//
+// Only r-values will trigger this sequence and execute our "move constructor"
+// or "move operator=." L-values will match the private copy constructor and
+// operator= first giving a "private in this context" error. This combination
+// gives us a move-only type.
+//
+// For signaling a destructive transfer of data from an l-value, we provide a
+// method named Pass() which creates an r-value for the current instance
+// triggering the move constructor or move operator=.
+//
+// Other ways to get r-values is to use the result of an expression like a
+// function call.
+//
+// Here's an example with comments explaining what gets triggered where:
+//
+// class Foo {
+// MOVE_ONLY_TYPE_FOR_CPP_03(Foo, RValue);
+//
+// public:
+// ... API ...
+// Foo(RValue other); // Move constructor.
+// Foo& operator=(RValue rhs); // Move operator=
+// };
+//
+// Foo MakeFoo(); // Function that returns a Foo.
+//
+// Foo f;
+// Foo f_copy(f); // ERROR: Foo(Foo&) is private in this context.
+// Foo f_assign;
+// f_assign = f; // ERROR: operator=(Foo&) is private in this context.
+//
+//
+// Foo f(MakeFoo()); // R-value so alternate conversion executed.
+// Foo f_copy(f.Pass()); // R-value so alternate conversion executed.
+// f = f_copy.Pass(); // R-value so alternate conversion executed.
+//
+//
+// IMPLEMENTATION SUBTLETIES WITH RValue
+//
+// The RValue struct is just a container for a pointer back to the original
+// object. It should only ever be created as a temporary, and no external
+// class should ever declare it or use it in a parameter.
+//
+// It is tempting to want to use the RValue type in function parameters, but
+// excluding the limited usage here for the move constructor and move
+// operator=, doing so would mean that the function could take both r-values
+// and l-values equially which is unexpected. See COMPARED To Boost.Move for
+// more details.
+//
+// An alternate, and incorrect, implementation of the RValue class used by
+// Boost.Move makes RValue a fieldless child of the move-only type. RValue&
+// is then used in place of RValue in the various operators. The RValue& is
+// "created" by doing *reinterpret_cast<RValue*>(this). This has the appeal
+// of never creating a temporary RValue struct even with optimizations
+// disabled. Also, by virtue of inheritance you can treat the RValue
+// reference as if it were the move-only type itself. Unfortunately,
+// using the result of this reinterpret_cast<> is actually undefined behavior
+// due to C++98 5.2.10.7. In certain compilers (e.g., NaCl) the optimizer
+// will generate non-working code.
+//
+// In optimized builds, both implementations generate the same assembly so we
+// choose the one that adheres to the standard.
+//
+//
+// COMPARED TO C++11
+//
+// In C++11, you would implement this functionality using an r-value reference
+// and our .Pass() method would be replaced with a call to std::move().
+//
+// This emulation also has a deficiency where it uses up the single
+// user-defined conversion allowed by C++ during initialization. This can
+// cause problems in some API edge cases. For instance, in scoped_ptr, it is
+// impossible to make a function "void Foo(scoped_ptr<Parent> p)" accept a
+// value of type scoped_ptr<Child> even if you add a constructor to
+// scoped_ptr<> that would make it look like it should work. C++11 does not
+// have this deficiency.
+//
+//
+// COMPARED TO Boost.Move
+//
+// Our implementation similar to Boost.Move, but we keep the RValue struct
+// private to the move-only type, and we don't use the reinterpret_cast<> hack.
+//
+// In Boost.Move, RValue is the boost::rv<> template. This type can be used
+// when writing APIs like:
+//
+// void MyFunc(boost::rv<Foo>& f)
+//
+// that can take advantage of rv<> to avoid extra copies of a type. However you
+// would still be able to call this version of MyFunc with an l-value:
+//
+// Foo f;
+// MyFunc(f); // Uh oh, we probably just destroyed |f| w/o calling Pass().
+//
+// unless someone is very careful to also declare a parallel override like:
+//
+// void MyFunc(const Foo& f)
+//
+// that would catch the l-values first. This was declared unsafe in C++11 and
+// a C++11 compiler will explicitly fail MyFunc(f). Unfortunately, we cannot
+// ensure this in C++03.
+//
+// Since we have no need for writing such APIs yet, our implementation keeps
+// RValue private and uses a .Pass() method to do the conversion instead of
+// trying to write a version of "std::move()." Writing an API like std::move()
+// would require the RValue struct to be public.
+//
+//
+// CAVEATS
+//
+// If you include a move-only type as a field inside a class that does not
+// explicitly declare a copy constructor, the containing class's implicit
+// copy constructor will change from Containing(const Containing&) to
+// Containing(Containing&). This can cause some unexpected errors.
+//
+// http://llvm.org/bugs/show_bug.cgi?id=11528
+//
+// The workaround is to explicitly declare your copy constructor.
+//
+#define MOVE_ONLY_TYPE_FOR_CPP_03(type, rvalue_type) \
+ private: \
+ struct rvalue_type { \
+ explicit rvalue_type(type* object) : object(object) {} \
+ type* object; \
+ }; \
+ type(type&); \
+ void operator=(type&); \
+ public: \
+ operator rvalue_type() { return rvalue_type(this); } \
+ type Pass() { return type(rvalue_type(this)); } \
+ private:
+
+#endif // WEBRTC_SYSTEM_WRAPPERS_INTEFACE_MOVE_H_
diff --git a/system_wrappers/interface/scoped_ptr.h b/system_wrappers/interface/scoped_ptr.h
index cfaf5cb..a2a1b44 100644
--- a/system_wrappers/interface/scoped_ptr.h
+++ b/system_wrappers/interface/scoped_ptr.h
@@ -1,118 +1,581 @@
-// (C) Copyright Greg Colvin and Beman Dawes 1998, 1999.
-// Copyright (c) 2001, 2002 Peter Dimov
-//
-// Permission to copy, use, modify, sell and distribute this software
-// is granted provided this copyright notice appears in all copies.
-// This software is provided "as is" without express or implied
-// warranty, and with no claim as to its suitability for any purpose.
-//
-// See http://www.boost.org/libs/smart_ptr/scoped_ptr.htm for documentation.
-//
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
-// scoped_ptr mimics a built-in pointer except that it guarantees deletion
-// of the object pointed to, either on destruction of the scoped_ptr or via
-// an explicit reset(). scoped_ptr is a simple solution for simple needs;
-// use shared_ptr or std::auto_ptr if your needs are more complex.
+// Borrowed from Chromium's src/base/memory/scoped_ptr.h.
-// scoped_ptr_malloc added in by Google. When one of
-// these goes out of scope, instead of doing a delete or delete[], it
-// calls free(). scoped_ptr_malloc<char> is likely to see much more
-// use than any other specializations.
+// Scopers help you manage ownership of a pointer, helping you easily manage the
+// a pointer within a scope, and automatically destroying the pointer at the
+// end of a scope. There are two main classes you will use, which correspond
+// to the operators new/delete and new[]/delete[].
+//
+// Example usage (scoped_ptr<T>):
+// {
+// scoped_ptr<Foo> foo(new Foo("wee"));
+// } // foo goes out of scope, releasing the pointer with it.
+//
+// {
+// scoped_ptr<Foo> foo; // No pointer managed.
+// foo.reset(new Foo("wee")); // Now a pointer is managed.
+// foo.reset(new Foo("wee2")); // Foo("wee") was destroyed.
+// foo.reset(new Foo("wee3")); // Foo("wee2") was destroyed.
+// foo->Method(); // Foo::Method() called.
+// foo.get()->Method(); // Foo::Method() called.
+// SomeFunc(foo.release()); // SomeFunc takes ownership, foo no longer
+// // manages a pointer.
+// foo.reset(new Foo("wee4")); // foo manages a pointer again.
+// foo.reset(); // Foo("wee4") destroyed, foo no longer
+// // manages a pointer.
+// } // foo wasn't managing a pointer, so nothing was destroyed.
+//
+// Example usage (scoped_ptr<T[]>):
+// {
+// scoped_ptr<Foo[]> foo(new Foo[100]);
+// foo.get()->Method(); // Foo::Method on the 0th element.
+// foo[10].Method(); // Foo::Method on the 10th element.
+// }
+//
+// These scopers also implement part of the functionality of C++11 unique_ptr
+// in that they are "movable but not copyable." You can use the scopers in
+// the parameter and return types of functions to signify ownership transfer
+// in to and out of a function. When calling a function that has a scoper
+// as the argument type, it must be called with the result of an analogous
+// scoper's Pass() function or another function that generates a temporary;
+// passing by copy will NOT work. Here is an example using scoped_ptr:
+//
+// void TakesOwnership(scoped_ptr<Foo> arg) {
+// // Do something with arg
+// }
+// scoped_ptr<Foo> CreateFoo() {
+// // No need for calling Pass() because we are constructing a temporary
+// // for the return value.
+// return scoped_ptr<Foo>(new Foo("new"));
+// }
+// scoped_ptr<Foo> PassThru(scoped_ptr<Foo> arg) {
+// return arg.Pass();
+// }
+//
+// {
+// scoped_ptr<Foo> ptr(new Foo("yay")); // ptr manages Foo("yay").
+// TakesOwnership(ptr.Pass()); // ptr no longer owns Foo("yay").
+// scoped_ptr<Foo> ptr2 = CreateFoo(); // ptr2 owns the return Foo.
+// scoped_ptr<Foo> ptr3 = // ptr3 now owns what was in ptr2.
+// PassThru(ptr2.Pass()); // ptr2 is correspondingly NULL.
+// }
+//
+// Notice that if you do not call Pass() when returning from PassThru(), or
+// when invoking TakesOwnership(), the code will not compile because scopers
+// are not copyable; they only implement move semantics which require calling
+// the Pass() function to signify a destructive transfer of state. CreateFoo()
+// is different though because we are constructing a temporary on the return
+// line and thus can avoid needing to call Pass().
+//
+// Pass() properly handles upcast in initialization, i.e. you can use a
+// scoped_ptr<Child> to initialize a scoped_ptr<Parent>:
+//
+// scoped_ptr<Foo> foo(new Foo());
+// scoped_ptr<FooParent> parent(foo.Pass());
+//
+// PassAs<>() should be used to upcast return value in return statement:
+//
+// scoped_ptr<Foo> CreateFoo() {
+// scoped_ptr<FooChild> result(new FooChild());
+// return result.PassAs<Foo>();
+// }
+//
+// Note that PassAs<>() is implemented only for scoped_ptr<T>, but not for
+// scoped_ptr<T[]>. This is because casting array pointers may not be safe.
-// release() added in by Google. Use this to conditionally
-// transfer ownership of a heap-allocated object to the caller, usually on
-// method success.
#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_SCOPED_PTR_H_
#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_SCOPED_PTR_H_
-#include <assert.h> // for assert
-#include <stddef.h> // for ptrdiff_t
-#include <stdlib.h> // for free() decl
+// This is an implementation designed to match the anticipated future TR2
+// implementation of the scoped_ptr class and scoped_ptr_malloc (deprecated).
-#ifdef _WIN32
-namespace std { using ::ptrdiff_t; };
-#endif // _WIN32
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <algorithm> // For std::swap().
+
+#include "webrtc/system_wrappers/interface/compile_assert.h"
+#include "webrtc/system_wrappers/interface/constructor_magic.h"
+#include "webrtc/system_wrappers/interface/move.h"
+#include "webrtc/system_wrappers/interface/template_util.h"
+#include "webrtc/typedefs.h"
namespace webrtc {
-template <typename T>
-class scoped_ptr {
- private:
-
- T* ptr;
-
- scoped_ptr(scoped_ptr const &);
- scoped_ptr & operator=(scoped_ptr const &);
-
- public:
-
- typedef T element_type;
-
- explicit scoped_ptr(T* p = NULL): ptr(p) {}
-
- ~scoped_ptr() {
- typedef char type_must_be_complete[sizeof(T)];
+// Function object which deletes its parameter, which must be a pointer.
+// If C is an array type, invokes 'delete[]' on the parameter; otherwise,
+// invokes 'delete'. The default deleter for scoped_ptr<T>.
+template <class T>
+struct DefaultDeleter {
+ DefaultDeleter() {}
+ template <typename U> DefaultDeleter(const DefaultDeleter<U>& other) {
+ // IMPLEMENTATION NOTE: C++11 20.7.1.1.2p2 only provides this constructor
+ // if U* is implicitly convertible to T* and U is not an array type.
+ //
+ // Correct implementation should use SFINAE to disable this
+ // constructor. However, since there are no other 1-argument constructors,
+ // using a COMPILE_ASSERT() based on is_convertible<> and requiring
+ // complete types is simpler and will cause compile failures for equivalent
+ // misuses.
+ //
+ // Note, the is_convertible<U*, T*> check also ensures that U is not an
+ // array. T is guaranteed to be a non-array, so any U* where U is an array
+ // cannot convert to T*.
+ enum { T_must_be_complete = sizeof(T) };
+ enum { U_must_be_complete = sizeof(U) };
+ COMPILE_ASSERT((webrtc::is_convertible<U*, T*>::value),
+ U_ptr_must_implicitly_convert_to_T_ptr);
+ }
+ inline void operator()(T* ptr) const {
+ enum { type_must_be_complete = sizeof(T) };
delete ptr;
}
-
- void reset(T* p = NULL) {
- typedef char type_must_be_complete[sizeof(T)];
-
- if (ptr != p) {
- T* obj = ptr;
- ptr = p;
- // Delete last, in case obj destructor indirectly results in ~scoped_ptr
- delete obj;
- }
- }
-
- T& operator*() const {
- assert(ptr != NULL);
- return *ptr;
- }
-
- T* operator->() const {
- assert(ptr != NULL);
- return ptr;
- }
-
- T* get() const {
- return ptr;
- }
-
- void swap(scoped_ptr & b) {
- T* tmp = b.ptr;
- b.ptr = ptr;
- ptr = tmp;
- }
-
- T* release() {
- T* tmp = ptr;
- ptr = NULL;
- return tmp;
- }
-
- T** accept() {
- if (ptr) {
- delete ptr;
- ptr = NULL;
- }
- return &ptr;
- }
-
- T** use() {
- return &ptr;
- }
};
-template<typename T> inline
-void swap(scoped_ptr<T>& a, scoped_ptr<T>& b) {
- a.swap(b);
+// Specialization of DefaultDeleter for array types.
+template <class T>
+struct DefaultDeleter<T[]> {
+ inline void operator()(T* ptr) const {
+ enum { type_must_be_complete = sizeof(T) };
+ delete[] ptr;
+ }
+
+ private:
+ // Disable this operator for any U != T because it is undefined to execute
+ // an array delete when the static type of the array mismatches the dynamic
+ // type.
+ //
+ // References:
+ // C++98 [expr.delete]p3
+ // http://cplusplus.github.com/LWG/lwg-defects.html#938
+ template <typename U> void operator()(U* array) const;
+};
+
+template <class T, int n>
+struct DefaultDeleter<T[n]> {
+ // Never allow someone to declare something like scoped_ptr<int[10]>.
+ COMPILE_ASSERT(sizeof(T) == -1, do_not_use_array_with_size_as_type);
+};
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in scoped_ptr:
+//
+// scoped_ptr<int, webrtc::FreeDeleter> foo_ptr(
+// static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+ inline void operator()(void* ptr) const {
+ free(ptr);
+ }
+};
+
+namespace internal {
+
+// Minimal implementation of the core logic of scoped_ptr, suitable for
+// reuse in both scoped_ptr and its specializations.
+template <class T, class D>
+class scoped_ptr_impl {
+ public:
+ explicit scoped_ptr_impl(T* p) : data_(p) { }
+
+ // Initializer for deleters that have data parameters.
+ scoped_ptr_impl(T* p, const D& d) : data_(p, d) {}
+
+ // Templated constructor that destructively takes the value from another
+ // scoped_ptr_impl.
+ template <typename U, typename V>
+ scoped_ptr_impl(scoped_ptr_impl<U, V>* other)
+ : data_(other->release(), other->get_deleter()) {
+ // We do not support move-only deleters. We could modify our move
+ // emulation to have webrtc::subtle::move() and webrtc::subtle::forward()
+ // functions that are imperfect emulations of their C++11 equivalents,
+ // but until there's a requirement, just assume deleters are copyable.
+ }
+
+ template <typename U, typename V>
+ void TakeState(scoped_ptr_impl<U, V>* other) {
+ // See comment in templated constructor above regarding lack of support
+ // for move-only deleters.
+ reset(other->release());
+ get_deleter() = other->get_deleter();
+ }
+
+ ~scoped_ptr_impl() {
+ if (data_.ptr != NULL) {
+ // Not using get_deleter() saves one function call in non-optimized
+ // builds.
+ static_cast<D&>(data_)(data_.ptr);
+ }
+ }
+
+ void reset(T* p) {
+ // This is a self-reset, which is no longer allowed: http://crbug.com/162971
+ if (p != NULL && p == data_.ptr)
+ abort();
+
+ // Note that running data_.ptr = p can lead to undefined behavior if
+ // get_deleter()(get()) deletes this. In order to pevent this, reset()
+ // should update the stored pointer before deleting its old value.
+ //
+ // However, changing reset() to use that behavior may cause current code to
+ // break in unexpected ways. If the destruction of the owned object
+ // dereferences the scoped_ptr when it is destroyed by a call to reset(),
+ // then it will incorrectly dispatch calls to |p| rather than the original
+ // value of |data_.ptr|.
+ //
+ // During the transition period, set the stored pointer to NULL while
+ // deleting the object. Eventually, this safety check will be removed to
+ // prevent the scenario initially described from occuring and
+ // http://crbug.com/176091 can be closed.
+ T* old = data_.ptr;
+ data_.ptr = NULL;
+ if (old != NULL)
+ static_cast<D&>(data_)(old);
+ data_.ptr = p;
+ }
+
+ T* get() const { return data_.ptr; }
+
+ D& get_deleter() { return data_; }
+ const D& get_deleter() const { return data_; }
+
+ void swap(scoped_ptr_impl& p2) {
+ // Standard swap idiom: 'using std::swap' ensures that std::swap is
+ // present in the overload set, but we call swap unqualified so that
+ // any more-specific overloads can be used, if available.
+ using std::swap;
+ swap(static_cast<D&>(data_), static_cast<D&>(p2.data_));
+ swap(data_.ptr, p2.data_.ptr);
+ }
+
+ T* release() {
+ T* old_ptr = data_.ptr;
+ data_.ptr = NULL;
+ return old_ptr;
+ }
+
+ private:
+ // Needed to allow type-converting constructor.
+ template <typename U, typename V> friend class scoped_ptr_impl;
+
+ // Use the empty base class optimization to allow us to have a D
+ // member, while avoiding any space overhead for it when D is an
+ // empty class. See e.g. http://www.cantrip.org/emptyopt.html for a good
+ // discussion of this technique.
+ struct Data : public D {
+ explicit Data(T* ptr_in) : ptr(ptr_in) {}
+ Data(T* ptr_in, const D& other) : D(other), ptr(ptr_in) {}
+ T* ptr;
+ };
+
+ Data data_;
+
+ DISALLOW_COPY_AND_ASSIGN(scoped_ptr_impl);
+};
+
+} // namespace internal
+
+// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>
+// automatically deletes the pointer it holds (if any).
+// That is, scoped_ptr<T> owns the T object that it points to.
+// Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to a T object.
+// Also like T*, scoped_ptr<T> is thread-compatible, and once you
+// dereference it, you get the thread safety guarantees of T.
+//
+// The size of scoped_ptr is small. On most compilers, when using the
+// DefaultDeleter, sizeof(scoped_ptr<T>) == sizeof(T*). Custom deleters will
+// increase the size proportional to whatever state they need to have. See
+// comments inside scoped_ptr_impl<> for details.
+//
+// Current implementation targets having a strict subset of C++11's
+// unique_ptr<> features. Known deficiencies include not supporting move-only
+// deleteres, function pointers as deleters, and deleters with reference
+// types.
+template <class T, class D = webrtc::DefaultDeleter<T> >
+class scoped_ptr {
+ MOVE_ONLY_TYPE_FOR_CPP_03(scoped_ptr, RValue)
+
+ public:
+ // The element and deleter types.
+ typedef T element_type;
+ typedef D deleter_type;
+
+ // Constructor. Defaults to initializing with NULL.
+ scoped_ptr() : impl_(NULL) { }
+
+ // Constructor. Takes ownership of p.
+ explicit scoped_ptr(element_type* p) : impl_(p) { }
+
+ // Constructor. Allows initialization of a stateful deleter.
+ scoped_ptr(element_type* p, const D& d) : impl_(p, d) { }
+
+ // Constructor. Allows construction from a scoped_ptr rvalue for a
+ // convertible type and deleter.
+ //
+ // IMPLEMENTATION NOTE: C++11 unique_ptr<> keeps this constructor distinct
+ // from the normal move constructor. By C++11 20.7.1.2.1.21, this constructor
+ // has different post-conditions if D is a reference type. Since this
+ // implementation does not support deleters with reference type,
+ // we do not need a separate move constructor allowing us to avoid one
+ // use of SFINAE. You only need to care about this if you modify the
+ // implementation of scoped_ptr.
+ template <typename U, typename V>
+ scoped_ptr(scoped_ptr<U, V> other) : impl_(&other.impl_) {
+ COMPILE_ASSERT(!webrtc::is_array<U>::value, U_cannot_be_an_array);
+ }
+
+ // Constructor. Move constructor for C++03 move emulation of this type.
+ scoped_ptr(RValue rvalue) : impl_(&rvalue.object->impl_) { }
+
+ // operator=. Allows assignment from a scoped_ptr rvalue for a convertible
+ // type and deleter.
+ //
+ // IMPLEMENTATION NOTE: C++11 unique_ptr<> keeps this operator= distinct from
+ // the normal move assignment operator. By C++11 20.7.1.2.3.4, this templated
+ // form has different requirements on for move-only Deleters. Since this
+ // implementation does not support move-only Deleters, we do not need a
+ // separate move assignment operator allowing us to avoid one use of SFINAE.
+ // You only need to care about this if you modify the implementation of
+ // scoped_ptr.
+ template <typename U, typename V>
+ scoped_ptr& operator=(scoped_ptr<U, V> rhs) {
+ COMPILE_ASSERT(!webrtc::is_array<U>::value, U_cannot_be_an_array);
+ impl_.TakeState(&rhs.impl_);
+ return *this;
+ }
+
+ // Reset. Deletes the currently owned object, if any.
+ // Then takes ownership of a new object, if given.
+ void reset(element_type* p = NULL) { impl_.reset(p); }
+
+ // Accessors to get the owned object.
+ // operator* and operator-> will assert() if there is no current object.
+ element_type& operator*() const {
+ assert(impl_.get() != NULL);
+ return *impl_.get();
+ }
+ element_type* operator->() const {
+ assert(impl_.get() != NULL);
+ return impl_.get();
+ }
+ element_type* get() const { return impl_.get(); }
+
+ // Access to the deleter.
+ deleter_type& get_deleter() { return impl_.get_deleter(); }
+ const deleter_type& get_deleter() const { return impl_.get_deleter(); }
+
+ // Allow scoped_ptr<element_type> to be used in boolean expressions, but not
+ // implicitly convertible to a real bool (which is dangerous).
+ //
+ // Note that this trick is only safe when the == and != operators
+ // are declared explicitly, as otherwise "scoped_ptr1 ==
+ // scoped_ptr2" will compile but do the wrong thing (i.e., convert
+ // to Testable and then do the comparison).
+ private:
+ typedef webrtc::internal::scoped_ptr_impl<element_type, deleter_type>
+ scoped_ptr::*Testable;
+
+ public:
+ operator Testable() const { return impl_.get() ? &scoped_ptr::impl_ : NULL; }
+
+ // Comparison operators.
+ // These return whether two scoped_ptr refer to the same object, not just to
+ // two different but equal objects.
+ bool operator==(const element_type* p) const { return impl_.get() == p; }
+ bool operator!=(const element_type* p) const { return impl_.get() != p; }
+
+ // Swap two scoped pointers.
+ void swap(scoped_ptr& p2) {
+ impl_.swap(p2.impl_);
+ }
+
+ // Release a pointer.
+ // The return value is the current pointer held by this object.
+ // If this object holds a NULL pointer, the return value is NULL.
+ // After this operation, this object will hold a NULL pointer,
+ // and will not own the object any more.
+ element_type* release() WARN_UNUSED_RESULT {
+ return impl_.release();
+ }
+
+ // C++98 doesn't support functions templates with default parameters which
+ // makes it hard to write a PassAs() that understands converting the deleter
+ // while preserving simple calling semantics.
+ //
+ // Until there is a use case for PassAs() with custom deleters, just ignore
+ // the custom deleter.
+ template <typename PassAsType>
+ scoped_ptr<PassAsType> PassAs() {
+ return scoped_ptr<PassAsType>(Pass());
+ }
+
+ private:
+ // Needed to reach into |impl_| in the constructor.
+ template <typename U, typename V> friend class scoped_ptr;
+ webrtc::internal::scoped_ptr_impl<element_type, deleter_type> impl_;
+
+ // Forbidden for API compatibility with std::unique_ptr.
+ explicit scoped_ptr(int disallow_construction_from_null);
+
+ // Forbid comparison of scoped_ptr types. If U != T, it totally
+ // doesn't make sense, and if U == T, it still doesn't make sense
+ // because you should never have the same object owned by two different
+ // scoped_ptrs.
+ template <class U> bool operator==(scoped_ptr<U> const& p2) const;
+ template <class U> bool operator!=(scoped_ptr<U> const& p2) const;
+};
+
+template <class T, class D>
+class scoped_ptr<T[], D> {
+ MOVE_ONLY_TYPE_FOR_CPP_03(scoped_ptr, RValue)
+
+ public:
+ // The element and deleter types.
+ typedef T element_type;
+ typedef D deleter_type;
+
+ // Constructor. Defaults to initializing with NULL.
+ scoped_ptr() : impl_(NULL) { }
+
+ // Constructor. Stores the given array. Note that the argument's type
+ // must exactly match T*. In particular:
+ // - it cannot be a pointer to a type derived from T, because it is
+ // inherently unsafe in the general case to access an array through a
+ // pointer whose dynamic type does not match its static type (eg., if
+ // T and the derived types had different sizes access would be
+ // incorrectly calculated). Deletion is also always undefined
+ // (C++98 [expr.delete]p3). If you're doing this, fix your code.
+ // - it cannot be NULL, because NULL is an integral expression, not a
+ // pointer to T. Use the no-argument version instead of explicitly
+ // passing NULL.
+ // - it cannot be const-qualified differently from T per unique_ptr spec
+ // (http://cplusplus.github.com/LWG/lwg-active.html#2118). Users wanting
+ // to work around this may use implicit_cast<const T*>().
+ // However, because of the first bullet in this comment, users MUST
+ // NOT use implicit_cast<Base*>() to upcast the static type of the array.
+ explicit scoped_ptr(element_type* array) : impl_(array) { }
+
+ // Constructor. Move constructor for C++03 move emulation of this type.
+ scoped_ptr(RValue rvalue) : impl_(&rvalue.object->impl_) { }
+
+ // operator=. Move operator= for C++03 move emulation of this type.
+ scoped_ptr& operator=(RValue rhs) {
+ impl_.TakeState(&rhs.object->impl_);
+ return *this;
+ }
+
+ // Reset. Deletes the currently owned array, if any.
+ // Then takes ownership of a new object, if given.
+ void reset(element_type* array = NULL) { impl_.reset(array); }
+
+ // Accessors to get the owned array.
+ element_type& operator[](size_t i) const {
+ assert(impl_.get() != NULL);
+ return impl_.get()[i];
+ }
+ element_type* get() const { return impl_.get(); }
+
+ // Access to the deleter.
+ deleter_type& get_deleter() { return impl_.get_deleter(); }
+ const deleter_type& get_deleter() const { return impl_.get_deleter(); }
+
+ // Allow scoped_ptr<element_type> to be used in boolean expressions, but not
+ // implicitly convertible to a real bool (which is dangerous).
+ private:
+ typedef webrtc::internal::scoped_ptr_impl<element_type, deleter_type>
+ scoped_ptr::*Testable;
+
+ public:
+ operator Testable() const { return impl_.get() ? &scoped_ptr::impl_ : NULL; }
+
+ // Comparison operators.
+ // These return whether two scoped_ptr refer to the same object, not just to
+ // two different but equal objects.
+ bool operator==(element_type* array) const { return impl_.get() == array; }
+ bool operator!=(element_type* array) const { return impl_.get() != array; }
+
+ // Swap two scoped pointers.
+ void swap(scoped_ptr& p2) {
+ impl_.swap(p2.impl_);
+ }
+
+ // Release a pointer.
+ // The return value is the current pointer held by this object.
+ // If this object holds a NULL pointer, the return value is NULL.
+ // After this operation, this object will hold a NULL pointer,
+ // and will not own the object any more.
+ element_type* release() WARN_UNUSED_RESULT {
+ return impl_.release();
+ }
+
+ private:
+ // Force element_type to be a complete type.
+ enum { type_must_be_complete = sizeof(element_type) };
+
+ // Actually hold the data.
+ webrtc::internal::scoped_ptr_impl<element_type, deleter_type> impl_;
+
+ // Disable initialization from any type other than element_type*, by
+ // providing a constructor that matches such an initialization, but is
+ // private and has no definition. This is disabled because it is not safe to
+ // call delete[] on an array whose static type does not match its dynamic
+ // type.
+ template <typename U> explicit scoped_ptr(U* array);
+ explicit scoped_ptr(int disallow_construction_from_null);
+
+ // Disable reset() from any type other than element_type*, for the same
+ // reasons as the constructor above.
+ template <typename U> void reset(U* array);
+ void reset(int disallow_reset_from_null);
+
+ // Forbid comparison of scoped_ptr types. If U != T, it totally
+ // doesn't make sense, and if U == T, it still doesn't make sense
+ // because you should never have the same object owned by two different
+ // scoped_ptrs.
+ template <class U> bool operator==(scoped_ptr<U> const& p2) const;
+ template <class U> bool operator!=(scoped_ptr<U> const& p2) const;
+};
+
+} // namespace webrtc
+
+// Free functions
+template <class T, class D>
+void swap(webrtc::scoped_ptr<T, D>& p1, webrtc::scoped_ptr<T, D>& p2) {
+ p1.swap(p2);
}
+template <class T, class D>
+bool operator==(T* p1, const webrtc::scoped_ptr<T, D>& p2) {
+ return p1 == p2.get();
+}
+template <class T, class D>
+bool operator!=(T* p1, const webrtc::scoped_ptr<T, D>& p2) {
+ return p1 != p2.get();
+}
+// A function to convert T* into webrtc::scoped_ptr<T>
+// Doing e.g. make_scoped_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for webrtc::scoped_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+webrtc::scoped_ptr<T> make_scoped_ptr(T* ptr) {
+ return webrtc::scoped_ptr<T>(ptr);
+}
+namespace webrtc {
+
+// DEPRECATED: Use scoped_ptr<T[]> instead.
+// TODO(ajm): Remove scoped_array.
+//
// scoped_array extends scoped_ptr to arrays. Deletion of the array pointed to
// is guaranteed, either on destruction of the scoped_array or via an explicit
// reset(). Use shared_array or std::vector if your needs are more complex.
@@ -184,6 +647,9 @@
a.swap(b);
}
+// DEPRECATED: Use scoped_ptr<C, webrtc::FreeDeleter> instead.
+// TODO(ajm): Remove scoped_ptr_malloc.
+//
// scoped_ptr_malloc<> is similar to scoped_ptr<>, but it accepts a
// second template argument, the function used to free the object.
@@ -254,4 +720,4 @@
} // namespace webrtc
-#endif // #ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_SCOPED_PTR_H_
+#endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_SCOPED_PTR_H_
diff --git a/system_wrappers/interface/template_util.h b/system_wrappers/interface/template_util.h
new file mode 100644
index 0000000..5ae415b
--- /dev/null
+++ b/system_wrappers/interface/template_util.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/template_util.h.
+
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_TEMPLATE_UTIL_H_
+#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_TEMPLATE_UTIL_H_
+
+#include <cstddef> // For size_t.
+
+namespace webrtc {
+
+// Template definitions from tr1.
+
+template<class T, T v>
+struct integral_constant {
+ static const T value = v;
+ typedef T value_type;
+ typedef integral_constant<T, v> type;
+};
+
+template <class T, T v> const T integral_constant<T, v>::value;
+
+typedef integral_constant<bool, true> true_type;
+typedef integral_constant<bool, false> false_type;
+
+template <class T> struct is_pointer : false_type {};
+template <class T> struct is_pointer<T*> : true_type {};
+
+template <class T, class U> struct is_same : public false_type {};
+template <class T> struct is_same<T, T> : true_type {};
+
+template<class> struct is_array : public false_type {};
+template<class T, size_t n> struct is_array<T[n]> : public true_type {};
+template<class T> struct is_array<T[]> : public true_type {};
+
+template <class T> struct is_non_const_reference : false_type {};
+template <class T> struct is_non_const_reference<T&> : true_type {};
+template <class T> struct is_non_const_reference<const T&> : false_type {};
+
+template <class T> struct is_void : false_type {};
+template <> struct is_void<void> : true_type {};
+
+namespace internal {
+
+// Types YesType and NoType are guaranteed such that sizeof(YesType) <
+// sizeof(NoType).
+typedef char YesType;
+
+struct NoType {
+ YesType dummy[2];
+};
+
+// This class is an implementation detail for is_convertible, and you
+// don't need to know how it works to use is_convertible. For those
+// who care: we declare two different functions, one whose argument is
+// of type To and one with a variadic argument list. We give them
+// return types of different size, so we can use sizeof to trick the
+// compiler into telling us which function it would have chosen if we
+// had called it with an argument of type From. See Alexandrescu's
+// _Modern C++ Design_ for more details on this sort of trick.
+
+struct ConvertHelper {
+ template <typename To>
+ static YesType Test(To);
+
+ template <typename To>
+ static NoType Test(...);
+
+ template <typename From>
+ static From& Create();
+};
+
+// Used to determine if a type is a struct/union/class. Inspired by Boost's
+// is_class type_trait implementation.
+struct IsClassHelper {
+ template <typename C>
+ static YesType Test(void(C::*)(void));
+
+ template <typename C>
+ static NoType Test(...);
+};
+
+} // namespace internal
+
+// Inherits from true_type if From is convertible to To, false_type otherwise.
+//
+// Note that if the type is convertible, this will be a true_type REGARDLESS
+// of whether or not the conversion would emit a warning.
+template <typename From, typename To>
+struct is_convertible
+ : integral_constant<bool,
+ sizeof(internal::ConvertHelper::Test<To>(
+ internal::ConvertHelper::Create<From>())) ==
+ sizeof(internal::YesType)> {
+};
+
+template <typename T>
+struct is_class
+ : integral_constant<bool,
+ sizeof(internal::IsClassHelper::Test<T>(0)) ==
+ sizeof(internal::YesType)> {
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_TEMPLATE_UTIL_H_
diff --git a/system_wrappers/source/atomic32_win.cc b/system_wrappers/source/atomic32_win.cc
index 5dd0709..7c70376 100644
--- a/system_wrappers/source/atomic32_win.cc
+++ b/system_wrappers/source/atomic32_win.cc
@@ -20,9 +20,8 @@
Atomic32::Atomic32(int32_t initial_value)
: value_(initial_value) {
- // Make sure that the counter variable we're using is of the same size
- // as what the API expects.
- COMPILE_ASSERT(sizeof(value_) == sizeof(LONG));
+ COMPILE_ASSERT(sizeof(value_) == sizeof(LONG),
+ counter_variable_is_the_expected_size);
assert(Is32bitAligned());
}
diff --git a/test/test_suite.cc b/test/test_suite.cc
index c8ff742..7cfb856 100644
--- a/test/test_suite.cc
+++ b/test/test_suite.cc
@@ -21,8 +21,7 @@
namespace webrtc {
namespace test {
-TestSuite::TestSuite(int argc, char** argv)
- : trace_to_stderr_(NULL) {
+TestSuite::TestSuite(int argc, char** argv) {
SetExecutablePath(argv[0]);
testing::InitGoogleMock(&argc, argv); // Runs InitGoogleTest() internally.
// AllowCommandLineParsing allows us to ignore flags passed on to us by
diff --git a/tools/e2e_quality/e2e_quality.gyp b/tools/e2e_quality/e2e_quality.gyp
deleted file mode 100644
index 6ddc3a8..0000000
--- a/tools/e2e_quality/e2e_quality.gyp
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS. All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-{
- 'includes': ['../../build/common.gypi'],
- 'targets': [
- {
- 'target_name': 'audio_e2e_harness',
- 'type': 'executable',
- 'dependencies': [
- '<(webrtc_root)/test/test.gyp:channel_transport',
- '<(webrtc_root)/voice_engine/voice_engine.gyp:voice_engine',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
- ],
- 'sources': [
- 'audio/audio_e2e_harness.cc',
- ],
- },
- ],
-}
diff --git a/tools/tools.gyp b/tools/tools.gyp
index fa79046..b8dc4c1 100644
--- a/tools/tools.gyp
+++ b/tools/tools.gyp
@@ -101,6 +101,19 @@
['include_tests==1', {
'targets' : [
{
+ 'target_name': 'audio_e2e_harness',
+ 'type': 'executable',
+ 'dependencies': [
+ '<(webrtc_root)/test/test.gyp:channel_transport',
+ '<(webrtc_root)/voice_engine/voice_engine.gyp:voice_engine',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
+ ],
+ 'sources': [
+ 'e2e_quality/audio/audio_e2e_harness.cc',
+ ],
+ }, # audio_e2e_harness
+ {
'target_name': 'tools_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
diff --git a/typedefs.h b/typedefs.h
index 37c8fc9..dc206f2 100644
--- a/typedefs.h
+++ b/typedefs.h
@@ -25,38 +25,37 @@
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
-// TODO(andrew): replace WEBRTC_LITTLE_ENDIAN with WEBRTC_ARCH_LITTLE_ENDIAN.
#if defined(_M_X64) || defined(__x86_64__)
#define WEBRTC_ARCH_X86_FAMILY
#define WEBRTC_ARCH_X86_64
#define WEBRTC_ARCH_64_BITS
#define WEBRTC_ARCH_LITTLE_ENDIAN
-#define WEBRTC_LITTLE_ENDIAN
#elif defined(_M_IX86) || defined(__i386__)
#define WEBRTC_ARCH_X86_FAMILY
#define WEBRTC_ARCH_X86
#define WEBRTC_ARCH_32_BITS
#define WEBRTC_ARCH_LITTLE_ENDIAN
-#define WEBRTC_LITTLE_ENDIAN
#elif defined(__ARMEL__)
-// TODO(andrew): We'd prefer to control platform defines here, but this is
+// TODO(ajm): We'd prefer to control platform defines here, but this is
// currently provided by the Android makefiles. Commented to avoid duplicate
// definition warnings.
//#define WEBRTC_ARCH_ARM
-// TODO(andrew): Chromium uses the following two defines. Should we switch?
+// TODO(ajm): Chromium uses the following two defines. Should we switch?
//#define WEBRTC_ARCH_ARM_FAMILY
//#define WEBRTC_ARCH_ARMEL
#define WEBRTC_ARCH_32_BITS
#define WEBRTC_ARCH_LITTLE_ENDIAN
-#define WEBRTC_LITTLE_ENDIAN
#elif defined(__MIPSEL__)
#define WEBRTC_ARCH_32_BITS
#define WEBRTC_ARCH_LITTLE_ENDIAN
-#define WEBRTC_LITTLE_ENDIAN
#else
#error Please add support for your architecture in typedefs.h
#endif
+#if !(defined(WEBRTC_ARCH_LITTLE_ENDIAN) ^ defined(WEBRTC_ARCH_BIG_ENDIAN))
+#error Define either WEBRTC_ARCH_LITTLE_ENDIAN or WEBRTC_ARCH_BIG_ENDIAN
+#endif
+
#if defined(__SSE2__) || defined(_MSC_VER)
#define WEBRTC_USE_SSE2
#endif
@@ -93,4 +92,13 @@
#define OVERRIDE
#endif
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+// int foo() WARN_UNUSED_RESULT;
+#if defined(__GNUC__)
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+
#endif // WEBRTC_TYPEDEFS_H_
diff --git a/video_engine/include/vie_codec.h b/video_engine/include/vie_codec.h
index 976aee7..33ada28 100644
--- a/video_engine/include/vie_codec.h
+++ b/video_engine/include/vie_codec.h
@@ -39,7 +39,7 @@
// This method is called whenever the state of the AutoMuter changes, i.e.,
// when |is_muted| toggles.
// TODO(hlundin): Remove the default implementation when possible.
- virtual void VideoAutoMuted(bool is_muted) {}
+ virtual void VideoAutoMuted(int video_channel, bool is_muted) {}
protected:
virtual ~ViEEncoderObserver() {}
diff --git a/video_engine/include/vie_image_process.h b/video_engine/include/vie_image_process.h
index cb66bb1..9a12748 100644
--- a/video_engine/include/vie_image_process.h
+++ b/video_engine/include/vie_image_process.h
@@ -22,6 +22,8 @@
namespace webrtc {
+class I420FrameCallback;
+
class VideoEngine;
// This class declares an abstract interface for a user defined effect filter.
@@ -90,6 +92,17 @@
virtual int EnableColorEnhancement(const int video_channel,
const bool enable) = 0;
+ // New-style callbacks, used by VideoSendStream/VideoReceiveStream.
+ virtual void RegisterPreEncodeCallback(
+ int video_channel,
+ I420FrameCallback* pre_encode_callback) = 0;
+ virtual void DeRegisterPreEncodeCallback(int video_channel) = 0;
+
+ virtual void RegisterPreRenderCallback(
+ int video_channel,
+ I420FrameCallback* pre_render_callback) = 0;
+ virtual void DeRegisterPreRenderCallback(int video_channel) = 0;
+
protected:
ViEImageProcess() {}
virtual ~ViEImageProcess() {}
diff --git a/video_engine/internal/video_receive_stream.cc b/video_engine/internal/video_receive_stream.cc
index fc742c7..a6a5875 100644
--- a/video_engine/internal/video_receive_stream.cc
+++ b/video_engine/internal/video_receive_stream.cc
@@ -19,6 +19,7 @@
#include "webrtc/video_engine/include/vie_capture.h"
#include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_external_codec.h"
+#include "webrtc/video_engine/include/vie_image_process.h"
#include "webrtc/video_engine/include/vie_network.h"
#include "webrtc/video_engine/include/vie_render.h"
#include "webrtc/video_engine/include/vie_rtp_rtcp.h"
@@ -42,6 +43,14 @@
// TODO(pbos): This is not fine grained enough...
rtp_rtcp_->SetNACKStatus(channel_, config_.rtp.nack.rtp_history_ms > 0);
rtp_rtcp_->SetKeyFrameRequestMethod(channel_, kViEKeyFrameRequestPliRtcp);
+ switch (config_.rtp.rtcp_mode) {
+ case newapi::kRtcpCompound:
+ rtp_rtcp_->SetRTCPStatus(channel_, kRtcpCompound_RFC4585);
+ break;
+ case newapi::kRtcpReducedSize:
+ rtp_rtcp_->SetRTCPStatus(channel_, kRtcpNonCompound_RFC5506);
+ break;
+ }
assert(config_.rtp.ssrc != 0);
@@ -78,21 +87,28 @@
render_ = webrtc::ViERender::GetInterface(video_engine);
assert(render_ != NULL);
- if (render_->AddRenderer(channel_, kVideoI420, this) != 0) {
- abort();
- }
+ render_->AddRenderer(channel_, kVideoI420, this);
+
+ image_process_ = ViEImageProcess::GetInterface(video_engine);
+ image_process_->RegisterPreRenderCallback(channel_,
+ config_.pre_render_callback);
clock_ = Clock::GetRealTimeClock();
}
VideoReceiveStream::~VideoReceiveStream() {
+ image_process_->DeRegisterPreEncodeCallback(channel_);
+
render_->RemoveRenderer(channel_);
+
for (size_t i = 0; i < config_.external_decoders.size(); ++i) {
external_codec_->DeRegisterExternalReceiveCodec(
channel_, config_.external_decoders[i].payload_type);
}
+
network_->DeregisterSendTransport(channel_);
+ image_process_->Release();
video_engine_base_->Release();
external_codec_->Release();
codec_->Release();
@@ -162,15 +178,8 @@
video_frame.set_timestamp(timestamp);
video_frame.set_render_time_ms(render_time);
- if (config_.post_decode_callback != NULL) {
- config_.post_decode_callback->FrameCallback(&video_frame);
- }
-
- if (config_.renderer != NULL) {
- // TODO(pbos): Add timing to RenderFrame call
- config_.renderer->RenderFrame(video_frame,
- render_time - clock_->TimeInMilliseconds());
- }
+ config_.renderer->RenderFrame(video_frame,
+ render_time - clock_->TimeInMilliseconds());
return 0;
}
diff --git a/video_engine/internal/video_receive_stream.h b/video_engine/internal/video_receive_stream.h
index b6b5319..b5d095c 100644
--- a/video_engine/internal/video_receive_stream.h
+++ b/video_engine/internal/video_receive_stream.h
@@ -25,6 +25,7 @@
class ViEBase;
class ViECodec;
class ViEExternalCodec;
+class ViEImageProcess;
class ViENetwork;
class ViERender;
class ViERTP_RTCP;
@@ -66,6 +67,7 @@
ViENetwork* network_;
ViERender* render_;
ViERTP_RTCP* rtp_rtcp_;
+ ViEImageProcess* image_process_;
int channel_;
diff --git a/video_engine/internal/video_send_stream.cc b/video_engine/internal/video_send_stream.cc
index 55921c2..d238b95 100644
--- a/video_engine/internal/video_send_stream.cc
+++ b/video_engine/internal/video_send_stream.cc
@@ -19,6 +19,7 @@
#include "webrtc/video_engine/include/vie_capture.h"
#include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_external_codec.h"
+#include "webrtc/video_engine/include/vie_image_process.h"
#include "webrtc/video_engine/include/vie_network.h"
#include "webrtc/video_engine/include/vie_rtp_rtcp.h"
#include "webrtc/video_engine/new_include/video_send_stream.h"
@@ -190,11 +191,16 @@
video_engine_base_->RegisterCpuOveruseObserver(channel_,
overuse_observer_.get());
}
+
+ image_process_ = ViEImageProcess::GetInterface(video_engine);
+ image_process_->RegisterPreEncodeCallback(channel_,
+ config_.pre_encode_callback);
}
VideoSendStream::~VideoSendStream() {
+ image_process_->DeRegisterPreEncodeCallback(channel_);
+
network_->DeregisterSendTransport(channel_);
- video_engine_base_->DeleteChannel(channel_);
capture_->DisconnectCaptureDevice(channel_);
capture_->ReleaseCaptureDevice(capture_id_);
@@ -204,6 +210,9 @@
config_.codec.plType);
}
+ video_engine_base_->DeleteChannel(channel_);
+
+ image_process_->Release();
video_engine_base_->Release();
capture_->Release();
codec_->Release();
@@ -220,10 +229,6 @@
I420VideoFrame frame_copy;
frame_copy.CopyFrame(frame);
- if (config_.pre_encode_callback != NULL) {
- config_.pre_encode_callback->FrameCallback(&frame_copy);
- }
-
ViEVideoFrameI420 vf;
// TODO(pbos): This represents a memcpy step and is only required because
diff --git a/video_engine/internal/video_send_stream.h b/video_engine/internal/video_send_stream.h
index 1241b48..27027ef 100644
--- a/video_engine/internal/video_send_stream.h
+++ b/video_engine/internal/video_send_stream.h
@@ -26,6 +26,7 @@
class ViECodec;
class ViEExternalCapture;
class ViEExternalCodec;
+class ViEImageProcess;
class ViENetwork;
class ViERTP_RTCP;
@@ -72,6 +73,7 @@
ViEExternalCodec* external_codec_;
ViENetwork* network_;
ViERTP_RTCP* rtp_rtcp_;
+ ViEImageProcess* image_process_;
int channel_;
int capture_id_;
diff --git a/video_engine/new_include/config.h b/video_engine/new_include/config.h
index d19c8d9..fec34ba 100644
--- a/video_engine/new_include/config.h
+++ b/video_engine/new_include/config.h
@@ -29,15 +29,6 @@
std::string c_name;
};
-namespace newapi {
-// RTCP mode to use. Compound mode is described by RFC 4585 and reduced-size
-// RTCP mode is described by RFC 5506.
-enum RtcpMode {
- kRtcpCompound,
- kRtcpReducedSize
-};
-} // namespace newapi
-
// Settings for NACK, see RFC 4585 for details.
struct NackConfig {
NackConfig() : rtp_history_ms(0) {}
diff --git a/video_engine/new_include/video_receive_stream.h b/video_engine/new_include/video_receive_stream.h
index 0235ee0..b5b47c5 100644
--- a/video_engine/new_include/video_receive_stream.h
+++ b/video_engine/new_include/video_receive_stream.h
@@ -22,6 +22,15 @@
namespace webrtc {
+namespace newapi {
+// RTCP mode to use. Compound mode is described by RFC 4585 and reduced-size
+// RTCP mode is described by RFC 5506.
+enum RtcpMode {
+ kRtcpCompound,
+ kRtcpReducedSize
+};
+} // namespace newapi
+
class VideoDecoder;
// TODO(mflodman) Move all these settings to VideoDecoder and move the
@@ -89,18 +98,22 @@
render_delay_ms(0),
audio_channel_id(0),
pre_decode_callback(NULL),
- post_decode_callback(NULL),
+ pre_render_callback(NULL),
target_delay_ms(0) {}
// Codecs the receive stream can receive.
std::vector<VideoCodec> codecs;
// Receive-stream specific RTP settings.
struct Rtp {
- Rtp() : ssrc(0) {}
+ Rtp() : ssrc(0), rtcp_mode(newapi::kRtcpReducedSize) {}
+
// TODO(mflodman) Do we require a set ssrc? What happens if the ssrc
// changes?
uint32_t ssrc;
+ // See RtcpMode for description.
+ newapi::RtcpMode rtcp_mode;
+
// See NackConfig for description.
NackConfig nack;
@@ -137,7 +150,7 @@
// Called for each decoded frame. E.g. used when adding effects to the
// decoded
// stream. 'NULL' disables the callback.
- I420FrameCallback* post_decode_callback;
+ I420FrameCallback* pre_render_callback;
// External video decoders to be used if incoming payload type matches the
// registered type for an external decoder.
diff --git a/video_engine/new_include/video_send_stream.h b/video_engine/new_include/video_send_stream.h
index 5c0ec32..c85ed78 100644
--- a/video_engine/new_include/video_send_stream.h
+++ b/video_engine/new_include/video_send_stream.h
@@ -86,9 +86,7 @@
static const size_t kDefaultMaxPacketSize = 1500 - 40; // TCP over IPv4.
struct Rtp {
- Rtp() : mode(newapi::kRtcpReducedSize),
- max_packet_size(kDefaultMaxPacketSize) {}
- newapi::RtcpMode mode;
+ Rtp() : max_packet_size(kDefaultMaxPacketSize) {}
std::vector<uint32_t> ssrcs;
diff --git a/video_engine/test/auto_test/source/vie_autotest_codec.cc b/video_engine/test/auto_test/source/vie_autotest_codec.cc
index d242831..84ae14c 100644
--- a/video_engine/test/auto_test/source/vie_autotest_codec.cc
+++ b/video_engine/test/auto_test/source/vie_autotest_codec.cc
@@ -88,7 +88,7 @@
last_outgoing_bitrate_ += bitrate;
}
- virtual void VideoAutoMuted(bool is_muted) {
+ virtual void VideoAutoMuted(int video_channel, bool is_muted) {
video_auto_muted_called_++;
}
diff --git a/video_engine/test/auto_test/source/vie_autotest_custom_call.cc b/video_engine/test/auto_test/source/vie_autotest_custom_call.cc
index 5d2ecfc..103e422 100644
--- a/video_engine/test/auto_test/source/vie_autotest_custom_call.cc
+++ b/video_engine/test/auto_test/source/vie_autotest_custom_call.cc
@@ -73,7 +73,7 @@
<< " BR: " << bitrate << std::endl;
}
- virtual void VideoAutoMuted(bool is_muted) {
+ virtual void VideoAutoMuted(int video_channel, bool is_muted) {
std::cout << "VideoAutoMuted: " << is_muted << std::endl;
}
};
diff --git a/video_engine/test/call_tests.cc b/video_engine/test/call_tests.cc
index bb10906..49cb8f5 100644
--- a/video_engine/test/call_tests.cc
+++ b/video_engine/test/call_tests.cc
@@ -13,6 +13,7 @@
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/common_video/test/frame_generator.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
@@ -28,6 +29,9 @@
namespace webrtc {
+static unsigned int kDefaultTimeoutMs = 30 * 1000;
+static unsigned int kLongTimeoutMs = 120 * 1000;
+
class CallTest : public ::testing::Test {
public:
CallTest()
@@ -85,11 +89,13 @@
void StartSending() {
receive_stream_->StartReceive();
send_stream_->StartSend();
- frame_generator_capturer_->Start();
+ if (frame_generator_capturer_.get() != NULL)
+ frame_generator_capturer_->Start();
}
void StopSending() {
- frame_generator_capturer_->Stop();
+ if (frame_generator_capturer_.get() != NULL)
+ frame_generator_capturer_->Stop();
if (send_stream_ != NULL)
send_stream_->StopSend();
if (receive_stream_ != NULL)
@@ -106,6 +112,7 @@
}
void ReceivesPliAndRecovers(int rtp_history_ms);
+ void RespectsRtcpMode(newapi::RtcpMode rtcp_mode);
scoped_ptr<Call> sender_call_;
scoped_ptr<Call> receiver_call_;
@@ -131,7 +138,7 @@
public:
NackObserver()
- : test::RtpRtcpObserver(120 * 1000),
+ : test::RtpRtcpObserver(kLongTimeoutMs),
rtp_parser_(RtpHeaderParser::Create()),
drop_burst_count_(0),
sent_rtp_packets_(0),
@@ -229,7 +236,7 @@
TEST_F(CallTest, UsesTraceCallback) {
const unsigned int kSenderTraceFilter = kTraceDebug;
const unsigned int kReceiverTraceFilter = kTraceDefault & (~kTraceDebug);
- class TraceObserver: public TraceCallback {
+ class TraceObserver : public TraceCallback {
public:
TraceObserver(unsigned int filter)
: filter_(filter), messages_left_(50), done_(EventWrapper::Create()) {}
@@ -242,7 +249,7 @@
done_->Set();
}
- EventTypeWrapper Wait() { return done_->Wait(30 * 1000); }
+ EventTypeWrapper Wait() { return done_->Wait(kDefaultTimeoutMs); }
private:
unsigned int filter_;
@@ -281,6 +288,50 @@
receiver_call_.reset();
}
+TEST_F(CallTest, TransmitsFirstFrame) {
+ class Renderer : public VideoRenderer {
+ public:
+ Renderer() : event_(EventWrapper::Create()) {}
+
+ virtual void RenderFrame(const I420VideoFrame& video_frame,
+ int /*time_to_render_ms*/) OVERRIDE {
+ event_->Set();
+ }
+
+ EventTypeWrapper Wait() { return event_->Wait(kDefaultTimeoutMs); }
+
+ scoped_ptr<EventWrapper> event_;
+ } renderer;
+
+ test::DirectTransport sender_transport, receiver_transport;
+
+ CreateCalls(Call::Config(&sender_transport),
+ Call::Config(&receiver_transport));
+
+ sender_transport.SetReceiver(receiver_call_->Receiver());
+ receiver_transport.SetReceiver(sender_call_->Receiver());
+
+ CreateTestConfigs();
+ receive_config_.renderer = &renderer;
+
+ CreateStreams();
+ StartSending();
+
+ scoped_ptr<test::FrameGenerator> frame_generator(test::FrameGenerator::Create(
+ send_config_.codec.width, send_config_.codec.height));
+ send_stream_->Input()->PutFrame(frame_generator->NextFrame(), 0);
+
+ EXPECT_EQ(kEventSignaled, renderer.Wait())
+ << "Timed out while waiting for the frame to render.";
+
+ StopSending();
+
+ sender_transport.StopSending();
+ receiver_transport.StopSending();
+
+ DestroyStreams();
+}
+
TEST_F(CallTest, ReceivesAndRetransmitsNack) {
NackObserver observer;
@@ -296,7 +347,6 @@
CreateStreams();
CreateFrameGenerator();
-
StartSending();
// Wait() waits for an event triggered when NACKs have been received, NACKed
@@ -310,12 +360,109 @@
DestroyStreams();
}
+TEST_F(CallTest, UsesFrameCallbacks) {
+ static const int kWidth = 320;
+ static const int kHeight = 240;
+
+ class Renderer : public VideoRenderer {
+ public:
+ Renderer() : event_(EventWrapper::Create()) {}
+
+ virtual void RenderFrame(const I420VideoFrame& video_frame,
+ int /*time_to_render_ms*/) OVERRIDE {
+ EXPECT_EQ(0, *video_frame.buffer(kYPlane))
+ << "Rendered frame should have zero luma which is applied by the "
+ "pre-render callback.";
+ event_->Set();
+ }
+
+ EventTypeWrapper Wait() { return event_->Wait(kDefaultTimeoutMs); }
+ scoped_ptr<EventWrapper> event_;
+ } renderer;
+
+ class TestFrameCallback : public I420FrameCallback {
+ public:
+ TestFrameCallback(int expected_luma_byte, int next_luma_byte)
+ : event_(EventWrapper::Create()),
+ expected_luma_byte_(expected_luma_byte),
+ next_luma_byte_(next_luma_byte) {}
+
+ EventTypeWrapper Wait() { return event_->Wait(kDefaultTimeoutMs); }
+
+ private:
+ virtual void FrameCallback(I420VideoFrame* frame) {
+ EXPECT_EQ(kWidth, frame->width())
+ << "Width not as expected, callback done before resize?";
+ EXPECT_EQ(kHeight, frame->height())
+ << "Height not as expected, callback done before resize?";
+
+ // Previous luma specified, observed luma should be fairly close.
+ if (expected_luma_byte_ != -1) {
+ EXPECT_NEAR(expected_luma_byte_, *frame->buffer(kYPlane), 10);
+ }
+
+ memset(frame->buffer(kYPlane),
+ next_luma_byte_,
+ frame->allocated_size(kYPlane));
+
+ event_->Set();
+ }
+
+ scoped_ptr<EventWrapper> event_;
+ int expected_luma_byte_;
+ int next_luma_byte_;
+ };
+
+ TestFrameCallback pre_encode_callback(-1, 255); // Changes luma to 255.
+ TestFrameCallback pre_render_callback(255, 0); // Changes luma from 255 to 0.
+
+ test::DirectTransport sender_transport, receiver_transport;
+
+ CreateCalls(Call::Config(&sender_transport),
+ Call::Config(&receiver_transport));
+
+ sender_transport.SetReceiver(receiver_call_->Receiver());
+ receiver_transport.SetReceiver(sender_call_->Receiver());
+
+ CreateTestConfigs();
+ send_config_.encoder = NULL;
+ send_config_.codec = sender_call_->GetVideoCodecs()[0];
+ send_config_.codec.width = kWidth;
+ send_config_.codec.height = kHeight;
+ send_config_.pre_encode_callback = &pre_encode_callback;
+ receive_config_.pre_render_callback = &pre_render_callback;
+ receive_config_.renderer = &renderer;
+
+ CreateStreams();
+ StartSending();
+
+ // Create frames that are smaller than the send width/height, this is done to
+ // check that the callbacks are done after processing video.
+ scoped_ptr<test::FrameGenerator> frame_generator(
+ test::FrameGenerator::Create(kWidth / 2, kHeight / 2));
+ send_stream_->Input()->PutFrame(frame_generator->NextFrame(), 0);
+
+ EXPECT_EQ(kEventSignaled, pre_encode_callback.Wait())
+ << "Timed out while waiting for pre-encode callback.";
+ EXPECT_EQ(kEventSignaled, pre_render_callback.Wait())
+ << "Timed out while waiting for pre-render callback.";
+ EXPECT_EQ(kEventSignaled, renderer.Wait())
+ << "Timed out while waiting for the frame to render.";
+
+ StopSending();
+
+ sender_transport.StopSending();
+ receiver_transport.StopSending();
+
+ DestroyStreams();
+}
+
class PliObserver : public test::RtpRtcpObserver, public VideoRenderer {
static const int kInverseDropProbability = 16;
public:
explicit PliObserver(bool nack_enabled)
- : test::RtpRtcpObserver(120 * 1000),
+ : test::RtpRtcpObserver(kLongTimeoutMs),
rtp_header_parser_(RtpHeaderParser::Create()),
nack_enabled_(nack_enabled),
first_retransmitted_timestamp_(0),
@@ -399,7 +546,6 @@
CreateStreams();
CreateFrameGenerator();
-
StartSending();
// Wait() waits for an event triggered when Pli has been received and frames
@@ -428,7 +574,9 @@
explicit PacketInputObserver(PacketReceiver* receiver)
: receiver_(receiver), delivered_packet_(EventWrapper::Create()) {}
- EventTypeWrapper Wait() { return delivered_packet_->Wait(30 * 1000); }
+ EventTypeWrapper Wait() {
+ return delivered_packet_->Wait(kDefaultTimeoutMs);
+ }
private:
virtual bool DeliverPacket(const uint8_t* packet, size_t length) {
@@ -457,7 +605,6 @@
CreateStreams();
CreateFrameGenerator();
-
StartSending();
receiver_call_->DestroyReceiveStream(receive_stream_);
@@ -474,6 +621,100 @@
receive_transport.StopSending();
}
+void CallTest::RespectsRtcpMode(newapi::RtcpMode rtcp_mode) {
+ static const int kRtpHistoryMs = 1000;
+ static const int kNumCompoundRtcpPacketsToObserve = 10;
+ class RtcpModeObserver : public test::RtpRtcpObserver {
+ public:
+ RtcpModeObserver(newapi::RtcpMode rtcp_mode)
+ : test::RtpRtcpObserver(kDefaultTimeoutMs),
+ rtcp_mode_(rtcp_mode),
+ sent_rtp_(0),
+ sent_rtcp_(0) {}
+
+ private:
+ virtual Action OnSendRtp(const uint8_t* packet, size_t length) OVERRIDE {
+ if (++sent_rtp_ % 3 == 0)
+ return DROP_PACKET;
+
+ return SEND_PACKET;
+ }
+
+ virtual Action OnReceiveRtcp(const uint8_t* packet,
+ size_t length) OVERRIDE {
+ ++sent_rtcp_;
+ RTCPUtility::RTCPParserV2 parser(packet, length, true);
+ EXPECT_TRUE(parser.IsValid());
+
+ RTCPUtility::RTCPPacketTypes packet_type = parser.Begin();
+ bool has_report_block = false;
+ while (packet_type != RTCPUtility::kRtcpNotValidCode) {
+ EXPECT_NE(RTCPUtility::kRtcpSrCode, packet_type);
+ if (packet_type == RTCPUtility::kRtcpRrCode) {
+ has_report_block = true;
+ break;
+ }
+ packet_type = parser.Iterate();
+ }
+
+ switch (rtcp_mode_) {
+ case newapi::kRtcpCompound:
+ if (!has_report_block) {
+ ADD_FAILURE() << "Received RTCP packet without receiver report for "
+ "kRtcpCompound.";
+ observation_complete_->Set();
+ }
+
+ if (sent_rtcp_ >= kNumCompoundRtcpPacketsToObserve)
+ observation_complete_->Set();
+
+ break;
+ case newapi::kRtcpReducedSize:
+ if (!has_report_block)
+ observation_complete_->Set();
+ break;
+ }
+
+ return SEND_PACKET;
+ }
+
+ newapi::RtcpMode rtcp_mode_;
+ int sent_rtp_;
+ int sent_rtcp_;
+ } observer(rtcp_mode);
+
+ CreateCalls(Call::Config(observer.SendTransport()),
+ Call::Config(observer.ReceiveTransport()));
+
+ observer.SetReceivers(receiver_call_->Receiver(), sender_call_->Receiver());
+
+ CreateTestConfigs();
+ send_config_.rtp.nack.rtp_history_ms = kRtpHistoryMs;
+ receive_config_.rtp.nack.rtp_history_ms = kRtpHistoryMs;
+ receive_config_.rtp.rtcp_mode = rtcp_mode;
+
+ CreateStreams();
+ CreateFrameGenerator();
+ StartSending();
+
+ EXPECT_EQ(kEventSignaled, observer.Wait())
+ << (rtcp_mode == newapi::kRtcpCompound
+ ? "Timed out before observing enough compound packets."
+ : "Timed out before receiving a non-compound RTCP packet.");
+
+ StopSending();
+ observer.StopSending();
+ DestroyStreams();
+}
+
+TEST_F(CallTest, UsesRtcpCompoundMode) {
+ RespectsRtcpMode(newapi::kRtcpCompound);
+}
+
+TEST_F(CallTest, UsesRtcpReducedSizeMode) {
+ RespectsRtcpMode(newapi::kRtcpReducedSize);
+}
+
// Test sets up a Call multiple senders with different resolutions and SSRCs.
// Another is set up to receive all three of these with different renderers.
// Each renderer verifies that it receives the expected resolution, and as soon
@@ -493,7 +734,7 @@
done_->Set();
}
- void Wait() { done_->Wait(30 * 1000); }
+ void Wait() { done_->Wait(kDefaultTimeoutMs); }
private:
int width_;
diff --git a/video_engine/test/common/frame_generator_capturer.cc b/video_engine/test/common/frame_generator_capturer.cc
index 02b4a31..08b3d29 100644
--- a/video_engine/test/common/frame_generator_capturer.cc
+++ b/video_engine/test/common/frame_generator_capturer.cc
@@ -66,7 +66,6 @@
sending_(false),
tick_(EventWrapper::Create()),
lock_(CriticalSectionWrapper::CreateCriticalSection()),
- thread_(NULL),
frame_generator_(frame_generator),
target_fps_(target_fps) {
assert(input != NULL);
diff --git a/video_engine/vie_base_impl.cc b/video_engine/vie_base_impl.cc
index 6b9fcfa..ddcfe29 100644
--- a/video_engine/vie_base_impl.cc
+++ b/video_engine/vie_base_impl.cc
@@ -347,7 +347,7 @@
// Add WebRTC Version.
std::stringstream version_stream;
- version_stream << "VideoEngine 3.44.0" << std::endl;
+ version_stream << "VideoEngine 3.45.0" << std::endl;
// Add build info.
version_stream << "Build: svn:" << WEBRTC_SVNREVISION << " " << BUILDINFO
diff --git a/video_engine/vie_channel.cc b/video_engine/vie_channel.cc
index e80cb4b..dabccef 100644
--- a/video_engine/vie_channel.cc
+++ b/video_engine/vie_channel.cc
@@ -29,6 +29,7 @@
#include "webrtc/video_engine/include/vie_errors.h"
#include "webrtc/video_engine/include/vie_image_process.h"
#include "webrtc/video_engine/include/vie_rtp_rtcp.h"
+#include "webrtc/video_engine/new_include/frame_callback.h"
#include "webrtc/video_engine/vie_defines.h"
namespace webrtc {
@@ -73,7 +74,6 @@
callback_cs_(CriticalSectionWrapper::CreateCriticalSection()),
rtp_rtcp_cs_(CriticalSectionWrapper::CreateCriticalSection()),
default_rtp_rtcp_(default_rtp_rtcp),
- rtp_rtcp_(NULL),
vcm_(*VideoCodingModule::Create(ViEModuleId(engine_id, channel_id))),
vie_receiver_(channel_id, &vcm_, remote_bitrate_estimator, this),
vie_sender_(channel_id),
@@ -101,7 +101,8 @@
mtu_(0),
sender_(sender),
nack_history_size_sender_(kSendSidePacketHistorySize),
- max_nack_reordering_threshold_(kMaxPacketAgeToNack) {
+ max_nack_reordering_threshold_(kMaxPacketAgeToNack),
+ pre_render_callback_(NULL) {
WEBRTC_TRACE(kTraceMemory, kTraceVideo, ViEId(engine_id, channel_id),
"ViEChannel::ViEChannel(channel_id: %d, engine_id: %d)",
channel_id, engine_id);
@@ -1599,6 +1600,8 @@
}
// Post processing is not supported if the frame is backed by a texture.
if (video_frame.native_handle() == NULL) {
+ if (pre_render_callback_ != NULL)
+ pre_render_callback_->FrameCallback(&video_frame);
if (effect_filter_) {
unsigned int length = CalcBufferSize(kI420,
video_frame.width(),
@@ -1827,6 +1830,12 @@
return 0;
}
+void ViEChannel::RegisterPreRenderCallback(
+ I420FrameCallback* pre_render_callback) {
+ CriticalSectionScoped cs(callback_cs_.get());
+ pre_render_callback_ = pre_render_callback;
+}
+
void ViEChannel::OnApplicationDataReceived(const int32_t id,
const uint8_t sub_type,
const uint32_t name,
diff --git a/video_engine/vie_channel.h b/video_engine/vie_channel.h
index 9791e10..40ceb1c 100644
--- a/video_engine/vie_channel.h
+++ b/video_engine/vie_channel.h
@@ -34,6 +34,7 @@
class Config;
class CriticalSectionWrapper;
class Encryption;
+class I420FrameCallback;
class PacedSender;
class ProcessThread;
class RtcpRttObserver;
@@ -308,6 +309,9 @@
int32_t RegisterEffectFilter(ViEEffectFilter* effect_filter);
+ // New-style callback, used by VideoReceiveStream.
+ void RegisterPreRenderCallback(I420FrameCallback* pre_render_callback);
+
protected:
static bool ChannelDecodeThreadFunction(void* obj);
bool ChannelDecodeProcess();
@@ -384,6 +388,7 @@
int nack_history_size_sender_;
int max_nack_reordering_threshold_;
+ I420FrameCallback* pre_render_callback_;
};
} // namespace webrtc
diff --git a/video_engine/vie_channel_group.cc b/video_engine/vie_channel_group.cc
index 5673a5a..50527fe 100644
--- a/video_engine/vie_channel_group.cc
+++ b/video_engine/vie_channel_group.cc
@@ -108,7 +108,7 @@
ChannelGroup::ChannelGroup(ProcessThread* process_thread,
const Config& config)
: remb_(new VieRemb()),
- bitrate_controller_(BitrateController::CreateBitrateController()),
+ bitrate_controller_(BitrateController::CreateBitrateController(true)),
call_stats_(new CallStats()),
remote_bitrate_estimator_(new WrappingBitrateEstimator(remb_.get(),
Clock::GetRealTimeClock(), process_thread)),
diff --git a/video_engine/vie_encoder.cc b/video_engine/vie_encoder.cc
index 0d714f3..6295920 100644
--- a/video_engine/vie_encoder.cc
+++ b/video_engine/vie_encoder.cc
@@ -28,6 +28,7 @@
#include "webrtc/system_wrappers/interface/trace_event.h"
#include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_image_process.h"
+#include "webrtc/video_engine/new_include/frame_callback.h"
#include "webrtc/video_engine/vie_defines.h"
namespace webrtc {
@@ -55,6 +56,8 @@
// VideoEngine API and remove the kTransmissionMaxBitrateMultiplier.
static const int kTransmissionMaxBitrateMultiplier = 2;
+static const float kStopPaddingThresholdMs = 2000;
+
std::vector<uint32_t> AllocateStreamBitrates(
uint32_t total_bitrate,
const SimulcastStream* stream_configs,
@@ -138,10 +141,10 @@
channel_id))),
vpm_(*webrtc::VideoProcessingModule::Create(ViEModuleId(engine_id,
channel_id))),
- default_rtp_rtcp_(NULL),
callback_cs_(CriticalSectionWrapper::CreateCriticalSection()),
data_cs_(CriticalSectionWrapper::CreateCriticalSection()),
bitrate_controller_(bitrate_controller),
+ time_of_last_incoming_frame_ms_(0),
send_padding_(false),
target_delay_ms_(0),
network_is_transmitting_(true),
@@ -159,7 +162,8 @@
has_received_rpsi_(false),
picture_id_rpsi_(0),
qm_callback_(NULL),
- video_auto_muted_(false) {
+ video_auto_muted_(false),
+ pre_encode_callback_(NULL) {
WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceVideo,
ViEId(engine_id, channel_id),
"%s(engine_id: %d) 0x%p - Constructor", __FUNCTION__, engine_id,
@@ -559,6 +563,7 @@
video_frame->timestamp());
{
CriticalSectionScoped cs(data_cs_.get());
+ time_of_last_incoming_frame_ms_ = TickTime::MillisecondTimestamp();
if (default_rtp_rtcp_->SendingMedia() == false) {
// We've paused or we have no channels attached, don't encode.
return;
@@ -646,6 +651,13 @@
if (decimated_frame == NULL) {
decimated_frame = video_frame;
}
+
+ {
+ CriticalSectionScoped cs(callback_cs_.get());
+ if (pre_encode_callback_)
+ pre_encode_callback_->FrameCallback(decimated_frame);
+ }
+
#ifdef VIDEOCODEC_VP8
if (vcm_.SendCodec() == webrtc::kVideoCodecVP8) {
webrtc::CodecSpecificInfo codec_specific_info;
@@ -1078,6 +1090,16 @@
// Disable padding if only sending one stream and video isn't muted.
pad_up_to_bitrate_kbps = 0;
}
+
+ {
+ // The amount of padding should decay to zero if no frames are being
+ // captured.
+ CriticalSectionScoped cs(data_cs_.get());
+ int64_t now_ms = TickTime::MillisecondTimestamp();
+ if (now_ms - time_of_last_incoming_frame_ms_ > kStopPaddingThresholdMs)
+ max_padding_bitrate_kbps = 0;
+ }
+
paced_sender_->UpdateBitrate(bitrate_kbps,
max_padding_bitrate_kbps,
pad_up_to_bitrate_kbps);
@@ -1090,7 +1112,7 @@
ViEId(engine_id_, channel_id_),
"%s: video_auto_muted_ changed to %i",
__FUNCTION__, video_auto_muted_);
- codec_observer_->VideoAutoMuted(video_auto_muted_);
+ codec_observer_->VideoAutoMuted(channel_id_, video_auto_muted_);
}
}
}
@@ -1138,6 +1160,17 @@
vcm_.EnableAutoMuting(threshold_bps, window_bps);
}
+void ViEEncoder::RegisterPreEncodeCallback(
+ I420FrameCallback* pre_encode_callback) {
+ CriticalSectionScoped cs(callback_cs_.get());
+ pre_encode_callback_ = pre_encode_callback;
+}
+
+void ViEEncoder::DeRegisterPreEncodeCallback() {
+ CriticalSectionScoped cs(callback_cs_.get());
+ pre_encode_callback_ = NULL;
+}
+
QMVideoSettingsCallback::QMVideoSettingsCallback(VideoProcessingModule* vpm)
: vpm_(vpm) {
}
diff --git a/video_engine/vie_encoder.h b/video_engine/vie_encoder.h
index 8d18bb6..bb60699 100644
--- a/video_engine/vie_encoder.h
+++ b/video_engine/vie_encoder.h
@@ -21,6 +21,7 @@
#include "webrtc/modules/video_processing/main/interface/video_processing.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/typedefs.h"
+#include "webrtc/video_engine/new_include/frame_callback.h"
#include "webrtc/video_engine/vie_defines.h"
#include "webrtc/video_engine/vie_frame_provider_base.h"
@@ -167,6 +168,10 @@
// |threshold_bps| + |window_bps|.
virtual void EnableAutoMuting(int threshold_bps, int window_bps);
+ // New-style callback, used by VideoSendStream.
+ void RegisterPreEncodeCallback(I420FrameCallback* pre_encode_callback);
+ void DeRegisterPreEncodeCallback();
+
int channel_id() const { return channel_id_; }
protected:
// Called by BitrateObserver.
@@ -178,7 +183,6 @@
bool TimeToSendPacket(uint32_t ssrc, uint16_t sequence_number,
int64_t capture_time_ms);
int TimeToSendPadding(int bytes);
-
private:
bool EncoderPaused() const;
@@ -197,6 +201,7 @@
BitrateController* bitrate_controller_;
+ int64_t time_of_last_incoming_frame_ms_;
bool send_padding_;
int target_delay_ms_;
bool network_is_transmitting_;
@@ -222,6 +227,7 @@
// Quality modes callback
QMVideoSettingsCallback* qm_callback_;
bool video_auto_muted_;
+ I420FrameCallback* pre_encode_callback_;
};
} // namespace webrtc
diff --git a/video_engine/vie_image_process_impl.cc b/video_engine/vie_image_process_impl.cc
index 199073a..92fe697 100644
--- a/video_engine/vie_image_process_impl.cc
+++ b/video_engine/vie_image_process_impl.cc
@@ -269,4 +269,35 @@
return 0;
}
+void ViEImageProcessImpl::RegisterPreEncodeCallback(
+ int video_channel,
+ I420FrameCallback* pre_encode_callback) {
+ ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
+ ViEEncoder* vie_encoder = cs.Encoder(video_channel);
+ vie_encoder->RegisterPreEncodeCallback(pre_encode_callback);
+}
+
+void ViEImageProcessImpl::DeRegisterPreEncodeCallback(int video_channel) {
+ ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
+ ViEEncoder* vie_encoder = cs.Encoder(video_channel);
+ assert(vie_encoder != NULL);
+ vie_encoder->DeRegisterPreEncodeCallback();
+}
+
+void ViEImageProcessImpl::RegisterPreRenderCallback(
+ int video_channel,
+ I420FrameCallback* pre_render_callback) {
+ ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
+ ViEChannel* vie_channel = cs.Channel(video_channel);
+ assert(vie_channel != NULL);
+ vie_channel->RegisterPreRenderCallback(pre_render_callback);
+}
+
+void ViEImageProcessImpl::DeRegisterPreRenderCallback(int video_channel) {
+ ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
+ ViEChannel* vie_channel = cs.Channel(video_channel);
+ assert(vie_channel != NULL);
+ vie_channel->RegisterPreRenderCallback(NULL);
+}
+
} // namespace webrtc
diff --git a/video_engine/vie_image_process_impl.h b/video_engine/vie_image_process_impl.h
index 81e6b78..9b43e7e 100644
--- a/video_engine/vie_image_process_impl.h
+++ b/video_engine/vie_image_process_impl.h
@@ -38,6 +38,15 @@
virtual int EnableDenoising(const int capture_id, const bool enable);
virtual int EnableColorEnhancement(const int video_channel,
const bool enable);
+ virtual void RegisterPreEncodeCallback(
+ int video_channel,
+ I420FrameCallback* pre_encode_callback) OVERRIDE;
+ virtual void DeRegisterPreEncodeCallback(int video_channel) OVERRIDE;
+
+ virtual void RegisterPreRenderCallback(
+ int video_channel,
+ I420FrameCallback* pre_render_callback) OVERRIDE;
+ virtual void DeRegisterPreRenderCallback(int video_channel) OVERRIDE;
protected:
explicit ViEImageProcessImpl(ViESharedData* shared_data);
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
index 49a7442..9e616a3 100644
--- a/voice_engine/channel.cc
+++ b/voice_engine/channel.cc
@@ -121,6 +121,8 @@
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SendPacket(channel=%d, len=%d)", channel, len);
+ CriticalSectionScoped cs(&_callbackCritSect);
+
if (_transportPtr == NULL)
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
@@ -158,8 +160,6 @@
// SRTP or External encryption
if (_encrypting)
{
- CriticalSectionScoped cs(&_callbackCritSect);
-
if (_encryptionPtr)
{
if (!_encryptionRTPBufferPtr)
@@ -192,39 +192,18 @@
}
}
- // Packet transmission using WebRtc socket transport
- if (!_externalTransport)
- {
- int n = _transportPtr->SendPacket(channel, bufferToSendPtr,
- bufferLength);
- if (n < 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceVoice,
- VoEId(_instanceId,_channelId),
- "Channel::SendPacket() RTP transmission using WebRtc"
- " sockets failed");
- return -1;
- }
- return n;
+ int n = _transportPtr->SendPacket(channel, bufferToSendPtr,
+ bufferLength);
+ if (n < 0) {
+ std::string transport_name =
+ _externalTransport ? "external transport" : "WebRtc sockets";
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendPacket() RTP transmission using %s failed",
+ transport_name.c_str());
+ return -1;
}
-
- // Packet transmission using external transport transport
- {
- CriticalSectionScoped cs(&_callbackCritSect);
-
- int n = _transportPtr->SendPacket(channel,
- bufferToSendPtr,
- bufferLength);
- if (n < 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceVoice,
- VoEId(_instanceId,_channelId),
- "Channel::SendPacket() RTP transmission using external"
- " transport failed");
- return -1;
- }
- return n;
- }
+ return n;
}
int
@@ -236,16 +215,14 @@
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SendRTCPPacket(channel=%d, len=%d)", channel, len);
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_transportPtr == NULL)
{
- CriticalSectionScoped cs(&_callbackCritSect);
- if (_transportPtr == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceVoice,
- VoEId(_instanceId,_channelId),
- "Channel::SendRTCPPacket() failed to send RTCP packet"
- " due to invalid transport object");
- return -1;
- }
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendRTCPPacket() failed to send RTCP packet"
+ " due to invalid transport object");
+ return -1;
}
uint8_t* bufferToSendPtr = (uint8_t*)data;
@@ -262,8 +239,6 @@
// SRTP or External encryption
if (_encrypting)
{
- CriticalSectionScoped cs(&_callbackCritSect);
-
if (_encryptionPtr)
{
if (!_encryptionRTCPBufferPtr)
@@ -294,45 +269,19 @@
}
}
- // Packet transmission using WebRtc socket transport
- if (!_externalTransport)
- {
- int n = _transportPtr->SendRTCPPacket(channel,
- bufferToSendPtr,
- bufferLength);
- if (n < 0)
- {
- WEBRTC_TRACE(kTraceInfo, kTraceVoice,
- VoEId(_instanceId,_channelId),
- "Channel::SendRTCPPacket() transmission using WebRtc"
- " sockets failed");
- return -1;
- }
- return n;
+ int n = _transportPtr->SendRTCPPacket(channel,
+ bufferToSendPtr,
+ bufferLength);
+ if (n < 0) {
+ std::string transport_name =
+ _externalTransport ? "external transport" : "WebRtc sockets";
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendRTCPPacket() transmission using %s failed",
+ transport_name.c_str());
+ return -1;
}
-
- // Packet transmission using external transport transport
- {
- CriticalSectionScoped cs(&_callbackCritSect);
- if (_transportPtr == NULL)
- {
- return -1;
- }
- int n = _transportPtr->SendRTCPPacket(channel,
- bufferToSendPtr,
- bufferLength);
- if (n < 0)
- {
- WEBRTC_TRACE(kTraceInfo, kTraceVoice,
- VoEId(_instanceId,_channelId),
- "Channel::SendRTCPPacket() transmission using external"
- " transport failed");
- return -1;
- }
- return n;
- }
-
- return len;
+ return n;
}
void
@@ -703,16 +652,26 @@
ApmProcessRx(audioFrame);
}
- // Output volume scaling
- if (_outputGain < 0.99f || _outputGain > 1.01f)
+ float output_gain = 1.0f;
+ float left_pan = 1.0f;
+ float right_pan = 1.0f;
{
- AudioFrameOperations::ScaleWithSat(_outputGain, audioFrame);
+ CriticalSectionScoped cs(&volume_settings_critsect_);
+ output_gain = _outputGain;
+ left_pan = _panLeft;
+ right_pan= _panRight;
+ }
+
+ // Output volume scaling
+ if (output_gain < 0.99f || output_gain > 1.01f)
+ {
+ AudioFrameOperations::ScaleWithSat(output_gain, audioFrame);
}
// Scale left and/or right channel(s) if stereo and master balance is
// active
- if (_panLeft != 1.0f || _panRight != 1.0f)
+ if (left_pan != 1.0f || right_pan != 1.0f)
{
if (audioFrame.num_channels_ == 1)
{
@@ -725,7 +684,7 @@
// Do the panning operation (the audio frame contains stereo at this
// stage)
- AudioFrameOperations::Scale(_panLeft, _panRight, audioFrame);
+ AudioFrameOperations::Scale(left_pan, right_pan, audioFrame);
}
// Mix decoded PCM output with file if file mixing is enabled
@@ -905,6 +864,7 @@
const Config& config) :
_fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
_callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ volume_settings_critsect_(*CriticalSectionWrapper::CreateCriticalSection()),
_instanceId(instanceId),
_channelId(channelId),
rtp_header_parser_(RtpHeaderParser::Create()),
@@ -959,7 +919,6 @@
_callbackCritSectPtr(NULL),
_transportPtr(NULL),
_encryptionPtr(NULL),
- rtp_audioproc_(NULL),
rx_audioproc_(AudioProcessing::Create(VoEModuleId(instanceId, channelId))),
_rxVadObserverPtr(NULL),
_oldVadDecision(-1),
@@ -1103,6 +1062,7 @@
delete [] _decryptionRTCPBufferPtr;
delete &_callbackCritSect;
delete &_fileCritSect;
+ delete &volume_settings_critsect_;
}
int32_t
@@ -2957,6 +2917,7 @@
int
Channel::SetMute(bool enable)
{
+ CriticalSectionScoped cs(&volume_settings_critsect_);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SetMute(enable=%d)", enable);
_mute = enable;
@@ -2966,12 +2927,14 @@
bool
Channel::Mute() const
{
+ CriticalSectionScoped cs(&volume_settings_critsect_);
return _mute;
}
int
Channel::SetOutputVolumePan(float left, float right)
{
+ CriticalSectionScoped cs(&volume_settings_critsect_);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SetOutputVolumePan()");
_panLeft = left;
@@ -2982,6 +2945,7 @@
int
Channel::GetOutputVolumePan(float& left, float& right) const
{
+ CriticalSectionScoped cs(&volume_settings_critsect_);
left = _panLeft;
right = _panRight;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
@@ -2993,6 +2957,7 @@
int
Channel::SetChannelOutputVolumeScaling(float scaling)
{
+ CriticalSectionScoped cs(&volume_settings_critsect_);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
"Channel::SetChannelOutputVolumeScaling()");
_outputGain = scaling;
@@ -3002,6 +2967,7 @@
int
Channel::GetChannelOutputVolumeScaling(float& scaling) const
{
+ CriticalSectionScoped cs(&volume_settings_critsect_);
scaling = _outputGain;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_instanceId,_channelId),
@@ -4397,7 +4363,7 @@
MixOrReplaceAudioWithFile(mixingFrequency);
}
- if (_mute)
+ if (Mute())
{
AudioFrameOperations::Mute(_audioFrame);
}
@@ -5053,6 +5019,7 @@
int32_t
Channel::SendPacketRaw(const void *data, int len, bool RTCP)
{
+ CriticalSectionScoped cs(&_callbackCritSect);
if (_transportPtr == NULL)
{
return -1;
diff --git a/voice_engine/channel.h b/voice_engine/channel.h
index 2e5ac53..39d36a8 100644
--- a/voice_engine/channel.h
+++ b/voice_engine/channel.h
@@ -447,6 +447,7 @@
CriticalSectionWrapper& _fileCritSect;
CriticalSectionWrapper& _callbackCritSect;
+ CriticalSectionWrapper& volume_settings_critsect_;
uint32_t _instanceId;
int32_t _channelId;
diff --git a/voice_engine/shared_data.cc b/voice_engine/shared_data.cc
index 2d485ae..2187844 100644
--- a/voice_engine/shared_data.cc
+++ b/voice_engine/shared_data.cc
@@ -29,7 +29,6 @@
_channelManager(_gInstanceCounter, config),
_engineStatistics(_gInstanceCounter),
_audioDevicePtr(NULL),
- audioproc_(NULL),
_moduleProcessThreadPtr(ProcessThread::CreateProcessThread()),
_externalRecording(false),
_externalPlayout(false)
@@ -76,9 +75,9 @@
_outputMixerPtr->SetAudioProcessingModule(audioproc);
}
-uint16_t SharedData::NumOfSendingChannels() {
+int SharedData::NumOfSendingChannels() {
ChannelManager::Iterator it(&_channelManager);
- uint16_t sending_channels = 0;
+ int sending_channels = 0;
for (ChannelManager::Iterator it(&_channelManager); it.IsValid();
it.Increment()) {
@@ -89,6 +88,19 @@
return sending_channels;
}
+int SharedData::NumOfPlayingChannels() {
+ ChannelManager::Iterator it(&_channelManager);
+ int playout_channels = 0;
+
+ for (ChannelManager::Iterator it(&_channelManager); it.IsValid();
+ it.Increment()) {
+ if (it.GetChannel()->Playing())
+ ++playout_channels;
+ }
+
+ return playout_channels;
+}
+
void SharedData::SetLastError(int32_t error) const {
_engineStatistics.SetLastError(error);
}
diff --git a/voice_engine/shared_data.h b/voice_engine/shared_data.h
index 7c7ad5c..dd76e96 100644
--- a/voice_engine/shared_data.h
+++ b/voice_engine/shared_data.h
@@ -56,7 +56,8 @@
_audioDeviceLayer = layer;
}
- uint16_t NumOfSendingChannels();
+ int NumOfSendingChannels();
+ int NumOfPlayingChannels();
// Convenience methods for calling statistics().SetLastError().
void SetLastError(int32_t error) const;
diff --git a/voice_engine/voe_base_impl.cc b/voice_engine/voe_base_impl.cc
index ab78f77..c76e06d 100644
--- a/voice_engine/voe_base_impl.cc
+++ b/voice_engine/voe_base_impl.cc
@@ -999,7 +999,7 @@
VoEId(_shared->instance_id(), -1),
"VoEBaseImpl::StopPlayout()");
// Stop audio-device playing if no channel is playing out
- if (_shared->NumOfSendingChannels() == 0) {
+ if (_shared->NumOfPlayingChannels() == 0) {
if (_shared->audio_device()->StopPlayout() != 0) {
_shared->SetLastError(VE_CANNOT_STOP_PLAYOUT,
kTraceError,