Update stable to r5300.
git-svn-id: http://webrtc.googlecode.com/svn/stable/webrtc@5301 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/common_types.h b/common_types.h
index bcdb5e8..3d47b86 100644
--- a/common_types.h
+++ b/common_types.h
@@ -69,27 +69,28 @@
enum TraceModule
{
- kTraceUndefined = 0,
+ kTraceUndefined = 0,
// not a module, triggered from the engine code
- kTraceVoice = 0x0001,
+ kTraceVoice = 0x0001,
// not a module, triggered from the engine code
- kTraceVideo = 0x0002,
+ kTraceVideo = 0x0002,
// not a module, triggered from the utility code
- kTraceUtility = 0x0003,
- kTraceRtpRtcp = 0x0004,
- kTraceTransport = 0x0005,
- kTraceSrtp = 0x0006,
- kTraceAudioCoding = 0x0007,
- kTraceAudioMixerServer = 0x0008,
- kTraceAudioMixerClient = 0x0009,
- kTraceFile = 0x000a,
- kTraceAudioProcessing = 0x000b,
- kTraceVideoCoding = 0x0010,
- kTraceVideoMixer = 0x0011,
- kTraceAudioDevice = 0x0012,
- kTraceVideoRenderer = 0x0014,
- kTraceVideoCapture = 0x0015,
- kTraceVideoPreocessing = 0x0016
+ kTraceUtility = 0x0003,
+ kTraceRtpRtcp = 0x0004,
+ kTraceTransport = 0x0005,
+ kTraceSrtp = 0x0006,
+ kTraceAudioCoding = 0x0007,
+ kTraceAudioMixerServer = 0x0008,
+ kTraceAudioMixerClient = 0x0009,
+ kTraceFile = 0x000a,
+ kTraceAudioProcessing = 0x000b,
+ kTraceVideoCoding = 0x0010,
+ kTraceVideoMixer = 0x0011,
+ kTraceAudioDevice = 0x0012,
+ kTraceVideoRenderer = 0x0014,
+ kTraceVideoCapture = 0x0015,
+ kTraceVideoPreocessing = 0x0016,
+ kTraceRemoteBitrateEstimator = 0x0017,
};
enum TraceLevel
diff --git a/modules/audio_processing/utility/delay_estimator.c b/modules/audio_processing/utility/delay_estimator.c
index 062874d..6d6e9bc 100644
--- a/modules/audio_processing/utility/delay_estimator.c
+++ b/modules/audio_processing/utility/delay_estimator.c
@@ -23,6 +23,22 @@
static const int32_t kProbabilityLowerLimit = 8704; // 17 in Q9.
static const int32_t kProbabilityMinSpread = 2816; // 5.5 in Q9.
+// Robust validation settings
+static const float kHistogramMax = 3000.f;
+static const float kLastHistogramMax = 250.f;
+static const float kMinHistogramThreshold = 1.5f;
+static const int kMinRequiredHits = 10;
+static const int kMaxHitsWhenPossiblyNonCausal = 10;
+static const int kMaxHitsWhenPossiblyCausal = 1000;
+// TODO(bjornv): Make kMaxDelayDifference a configurable parameter, since it
+// corresponds to the filter length if the delay estimation is used in echo
+// control.
+static const int kMaxDelayDifference = 32;
+static const float kQ14Scaling = 1.f / (1 << 14); // Scaling by 2^14 to get Q0.
+static const float kFractionSlope = 0.05f;
+static const float kMinFractionWhenPossiblyCausal = 0.5f;
+static const float kMinFractionWhenPossiblyNonCausal = 0.25f;
+
// Counts and returns number of bits of a 32-bit word.
static int BitCount(uint32_t u32) {
uint32_t tmp = u32 - ((u32 >> 1) & 033333333333) -
@@ -59,6 +75,189 @@
}
}
+// Collects necessary statistics for the HistogramBasedValidation(). This
+// function has to be called prior to calling HistogramBasedValidation(). The
+// statistics updated and used by the HistogramBasedValidation() are:
+// 1. the number of |candidate_hits|, which states for how long we have had the
+// same |candidate_delay|
+// 2. the |histogram| of candidate delays over time. This histogram is
+// weighted with respect to a reliability measure and time-varying to cope
+// with possible delay shifts.
+// For further description see commented code.
+//
+// Inputs:
+// - candidate_delay : The delay to validate.
+// - valley_depth_q14 : The cost function has a valley/minimum at the
+// |candidate_delay| location. |valley_depth_q14| is the
+// cost function difference between the minimum and
+// maximum locations. The value is in the Q14 domain.
+// - valley_level_q14 : Is the cost function value at the minimum, in Q14.
+static void UpdateRobustValidationStatistics(BinaryDelayEstimator* self,
+ int candidate_delay,
+ int32_t valley_depth_q14,
+ int32_t valley_level_q14) {
+ const float valley_depth = valley_depth_q14 * kQ14Scaling;
+ float decrease_in_last_set = valley_depth;
+ const int max_hits_for_slow_change = (candidate_delay < self->last_delay) ?
+ kMaxHitsWhenPossiblyNonCausal : kMaxHitsWhenPossiblyCausal;
+ int i = 0;
+
+ // Reset |candidate_hits| if we have a new candidate.
+ if (candidate_delay != self->last_candidate_delay) {
+ self->candidate_hits = 0;
+ self->last_candidate_delay = candidate_delay;
+ }
+ self->candidate_hits++;
+
+ // The |histogram| is updated differently across the bins.
+ // 1. The |candidate_delay| histogram bin is increased with the
+ // |valley_depth|, which is a simple measure of how reliable the
+ // |candidate_delay| is. The histogram is not increased above
+ // |kHistogramMax|.
+ self->histogram[candidate_delay] += valley_depth;
+ if (self->histogram[candidate_delay] > kHistogramMax) {
+ self->histogram[candidate_delay] = kHistogramMax;
+ }
+ // 2. The histogram bins in the neighborhood of |candidate_delay| are
+ // unaffected. The neighborhood is defined as x + {-2, -1, 0, 1}.
+ // 3. The histogram bins in the neighborhood of |last_delay| are decreased
+ // with |decrease_in_last_set|. This value equals the difference between
+ // the cost function values at the locations |candidate_delay| and
+ // |last_delay| until we reach |max_hits_for_slow_change| consecutive hits
+ // at the |candidate_delay|. If we exceed this amount of hits the
+ // |candidate_delay| is a "potential" candidate and we start decreasing
+ // these histogram bins more rapidly with |valley_depth|.
+ if (self->candidate_hits < max_hits_for_slow_change) {
+ decrease_in_last_set = (self->mean_bit_counts[self->compare_delay] -
+ valley_level_q14) * kQ14Scaling;
+ }
+ // 4. All other bins are decreased with |valley_depth|.
+ // TODO(bjornv): Investigate how to make this loop more efficient. Split up
+ // the loop? Remove parts that doesn't add too much.
+ for (i = 0; i < self->farend->history_size; ++i) {
+ int is_in_last_set = (i >= self->last_delay - 2) &&
+ (i <= self->last_delay + 1) && (i != candidate_delay);
+ int is_in_candidate_set = (i >= candidate_delay - 2) &&
+ (i <= candidate_delay + 1);
+ self->histogram[i] -= decrease_in_last_set * is_in_last_set +
+ valley_depth * (!is_in_last_set && !is_in_candidate_set);
+ // 5. No histogram bin can go below 0.
+ if (self->histogram[i] < 0) {
+ self->histogram[i] = 0;
+ }
+ }
+}
+
+// Validates the |candidate_delay|, estimated in WebRtc_ProcessBinarySpectrum(),
+// based on a mix of counting concurring hits with a modified histogram
+// of recent delay estimates. In brief a candidate is valid (returns 1) if it
+// is the most likely according to the histogram. There are a couple of
+// exceptions that are worth mentioning:
+// 1. If the |candidate_delay| < |last_delay| it can be that we are in a
+// non-causal state, breaking a possible echo control algorithm. Hence, we
+// open up for a quicker change by allowing the change even if the
+// |candidate_delay| is not the most likely one according to the histogram.
+// 2. There's a minimum number of hits (kMinRequiredHits) and the histogram
+// value has to reached a minimum (kMinHistogramThreshold) to be valid.
+// 3. The action is also depending on the filter length used for echo control.
+// If the delay difference is larger than what the filter can capture, we
+// also move quicker towards a change.
+// For further description see commented code.
+//
+// Input:
+// - candidate_delay : The delay to validate.
+//
+// Return value:
+// - is_histogram_valid : 1 - The |candidate_delay| is valid.
+// 0 - Otherwise.
+static int HistogramBasedValidation(const BinaryDelayEstimator* self,
+ int candidate_delay) {
+ float fraction = 1.f;
+ float histogram_threshold = self->histogram[self->compare_delay];
+ const int delay_difference = candidate_delay - self->last_delay;
+ int is_histogram_valid = 0;
+
+ // The histogram based validation of |candidate_delay| is done by comparing
+ // the |histogram| at bin |candidate_delay| with a |histogram_threshold|.
+ // This |histogram_threshold| equals a |fraction| of the |histogram| at bin
+ // |last_delay|. The |fraction| is a piecewise linear function of the
+ // |delay_difference| between the |candidate_delay| and the |last_delay|
+ // allowing for a quicker move if
+ // i) a potential echo control filter can not handle these large differences.
+ // ii) keeping |last_delay| instead of updating to |candidate_delay| could
+ // force an echo control into a non-causal state.
+ // We further require the histogram to have reached a minimum value of
+ // |kMinHistogramThreshold|. In addition, we also require the number of
+ // |candidate_hits| to be more than |kMinRequiredHits| to remove spurious
+ // values.
+
+ // Calculate a comparison histogram value (|histogram_threshold|) that is
+ // depending on the distance between the |candidate_delay| and |last_delay|.
+ // TODO(bjornv): How much can we gain by turning the fraction calculation
+ // into tables?
+ if (delay_difference >= kMaxDelayDifference) {
+ fraction = 1.f - kFractionSlope * (delay_difference - kMaxDelayDifference);
+ fraction = (fraction > kMinFractionWhenPossiblyCausal ? fraction :
+ kMinFractionWhenPossiblyCausal);
+ } else if (delay_difference < 0) {
+ fraction = kMinFractionWhenPossiblyNonCausal -
+ kFractionSlope * delay_difference;
+ fraction = (fraction > 1.f ? 1.f : fraction);
+ }
+ histogram_threshold *= fraction;
+ histogram_threshold = (histogram_threshold > kMinHistogramThreshold ?
+ histogram_threshold : kMinHistogramThreshold);
+
+ is_histogram_valid =
+ (self->histogram[candidate_delay] >= histogram_threshold) &&
+ (self->candidate_hits > kMinRequiredHits);
+
+ return is_histogram_valid;
+}
+
+// Performs a robust validation of the |candidate_delay| estimated in
+// WebRtc_ProcessBinarySpectrum(). The algorithm takes the
+// |is_instantaneous_valid| and the |is_histogram_valid| and combines them
+// into a robust validation. The HistogramBasedValidation() has to be called
+// prior to this call.
+// For further description on how the combination is done, see commented code.
+//
+// Inputs:
+// - candidate_delay : The delay to validate.
+// - is_instantaneous_valid : The instantaneous validation performed in
+// WebRtc_ProcessBinarySpectrum().
+// - is_histogram_valid : The histogram based validation.
+//
+// Return value:
+// - is_robust : 1 - The candidate_delay is valid according to a
+// combination of the two inputs.
+// : 0 - Otherwise.
+static int RobustValidation(const BinaryDelayEstimator* self,
+ int candidate_delay,
+ int is_instantaneous_valid,
+ int is_histogram_valid) {
+ int is_robust = 0;
+
+ // The final robust validation is based on the two algorithms; 1) the
+ // |is_instantaneous_valid| and 2) the histogram based with result stored in
+ // |is_histogram_valid|.
+ // i) Before we actually have a valid estimate (|last_delay| == -2), we say
+ // a candidate is valid if either algorithm states so
+ // (|is_instantaneous_valid| OR |is_histogram_valid|).
+ is_robust = (self->last_delay < 0) &&
+ (is_instantaneous_valid || is_histogram_valid);
+ // ii) Otherwise, we need both algorithms to be certain
+ // (|is_instantaneous_valid| AND |is_histogram_valid|)
+ is_robust |= is_instantaneous_valid && is_histogram_valid;
+ // iii) With one exception, i.e., the histogram based algorithm can overrule
+ // the instantaneous one if |is_histogram_valid| = 1 and the histogram
+ // is significantly strong.
+ is_robust |= is_histogram_valid &&
+ (self->histogram[candidate_delay] > self->last_delay_histogram);
+
+ return is_robust;
+}
+
void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) {
if (self == NULL) {
@@ -139,6 +338,9 @@
free(self->binary_near_history);
self->binary_near_history = NULL;
+ free(self->histogram);
+ self->histogram = NULL;
+
// BinaryDelayEstimator does not have ownership of |farend|, hence we do not
// free the memory here. That should be handled separately by the user.
self->farend = NULL;
@@ -161,8 +363,11 @@
self->farend = farend;
self->near_history_size = lookahead + 1;
- // Allocate memory for spectrum buffers.
- self->mean_bit_counts = malloc(farend->history_size * sizeof(int32_t));
+ // Allocate memory for spectrum buffers. The extra array element in
+ // |mean_bit_counts| and |histogram| is a dummy element only used while
+ // |last_delay| == -2, i.e., before we have a valid estimate.
+ self->mean_bit_counts =
+ malloc((farend->history_size + 1) * sizeof(int32_t));
malloc_fail |= (self->mean_bit_counts == NULL);
self->bit_counts = malloc(farend->history_size * sizeof(int32_t));
@@ -172,6 +377,9 @@
self->binary_near_history = malloc((lookahead + 1) * sizeof(uint32_t));
malloc_fail |= (self->binary_near_history == NULL);
+ self->histogram = malloc((farend->history_size + 1) * sizeof(float));
+ malloc_fail |= (self->histogram == NULL);
+
if (malloc_fail) {
WebRtc_FreeBinaryDelayEstimator(self);
self = NULL;
@@ -188,8 +396,9 @@
memset(self->bit_counts, 0, sizeof(int32_t) * self->farend->history_size);
memset(self->binary_near_history, 0,
sizeof(uint32_t) * self->near_history_size);
- for (i = 0; i < self->farend->history_size; ++i) {
+ for (i = 0; i <= self->farend->history_size; ++i) {
self->mean_bit_counts[i] = (20 << 9); // 20 in Q9.
+ self->histogram[i] = 0.f;
}
self->minimum_probability = (32 << 9); // 32 in Q9.
self->last_delay_probability = (32 << 9); // 32 in Q9.
@@ -198,6 +407,10 @@
self->last_delay = -2;
self->robust_validation_enabled = 0; // Disabled by default.
+ self->last_candidate_delay = -2;
+ self->compare_delay = self->farend->history_size;
+ self->candidate_hits = 0;
+ self->last_delay_histogram = 0.f;
}
int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self,
@@ -298,11 +511,32 @@
((value_best_candidate < self->minimum_probability) ||
(value_best_candidate < self->last_delay_probability)));
+ if (self->robust_validation_enabled) {
+ int is_histogram_valid = 0;
+ UpdateRobustValidationStatistics(self, candidate_delay, valley_depth,
+ value_best_candidate);
+ is_histogram_valid = HistogramBasedValidation(self, candidate_delay);
+ valid_candidate = RobustValidation(self, candidate_delay, valid_candidate,
+ is_histogram_valid);
+
+ }
if (valid_candidate) {
+ if (candidate_delay != self->last_delay) {
+ self->last_delay_histogram =
+ (self->histogram[candidate_delay] > kLastHistogramMax ?
+ kLastHistogramMax : self->histogram[candidate_delay]);
+ // Adjust the histogram if we made a change to |last_delay|, though it was
+ // not the most likely one according to the histogram.
+ if (self->histogram[candidate_delay] <
+ self->histogram[self->compare_delay]) {
+ self->histogram[self->compare_delay] = self->histogram[candidate_delay];
+ }
+ }
self->last_delay = candidate_delay;
if (value_best_candidate < self->last_delay_probability) {
self->last_delay_probability = value_best_candidate;
}
+ self->compare_delay = self->last_delay;
}
return self->last_delay;
diff --git a/modules/audio_processing/utility/delay_estimator.h b/modules/audio_processing/utility/delay_estimator.h
index 561514b..7ffb81b 100644
--- a/modules/audio_processing/utility/delay_estimator.h
+++ b/modules/audio_processing/utility/delay_estimator.h
@@ -44,6 +44,11 @@
// Robust validation
int robust_validation_enabled;
+ int last_candidate_delay;
+ int compare_delay;
+ int candidate_hits;
+ float* histogram;
+ float last_delay_histogram;
// Far-end binary spectrum history buffer etc.
BinaryDelayEstimatorFarend* farend;
diff --git a/modules/interface/module_common_types.h b/modules/interface/module_common_types.h
index 67c6cb4..2494d68 100644
--- a/modules/interface/module_common_types.h
+++ b/modules/interface/module_common_types.h
@@ -28,7 +28,9 @@
namespace webrtc {
struct RTPHeaderExtension {
+ bool hasTransmissionTimeOffset;
int32_t transmissionTimeOffset;
+ bool hasAbsoluteSendTime;
uint32_t absoluteSendTime;
};
diff --git a/modules/pacing/include/paced_sender.h b/modules/pacing/include/paced_sender.h
index 44e8144..0454690 100644
--- a/modules/pacing/include/paced_sender.h
+++ b/modules/pacing/include/paced_sender.h
@@ -92,7 +92,7 @@
// A negative queue size is interpreted as infinite.
virtual void set_max_queue_length_ms(int max_queue_length_ms);
- // Returns the time since the oldest queued packet was captured.
+ // Returns the time since the oldest queued packet was enqueued.
virtual int QueueInMs() const;
// Returns the number of milliseconds until the module want a worker thread
@@ -108,9 +108,7 @@
bool ShouldSendNextPacket(paced_sender::PacketList** packet_list);
// Local helper function to GetNextPacket.
- void GetNextPacketFromList(paced_sender::PacketList* packets,
- uint32_t* ssrc, uint16_t* sequence_number, int64_t* capture_time_ms,
- bool* retransmission);
+ paced_sender::Packet GetNextPacketFromList(paced_sender::PacketList* packets);
bool SendPacketFromList(paced_sender::PacketList* packet_list);
diff --git a/modules/pacing/paced_sender.cc b/modules/pacing/paced_sender.cc
index 7d4e81b..c46bd04 100644
--- a/modules/pacing/paced_sender.cc
+++ b/modules/pacing/paced_sender.cc
@@ -36,16 +36,18 @@
namespace paced_sender {
struct Packet {
Packet(uint32_t ssrc, uint16_t seq_number, int64_t capture_time_ms,
- int length_in_bytes, bool retransmission)
+ int64_t enqueue_time_ms, int length_in_bytes, bool retransmission)
: ssrc_(ssrc),
sequence_number_(seq_number),
capture_time_ms_(capture_time_ms),
+ enqueue_time_ms_(enqueue_time_ms),
bytes_(length_in_bytes),
retransmission_(retransmission) {
}
uint32_t ssrc_;
uint16_t sequence_number_;
int64_t capture_time_ms_;
+ int64_t enqueue_time_ms_;
int bytes_;
bool retransmission_;
};
@@ -201,8 +203,11 @@
packet_list = low_priority_packets_.get();
break;
}
- packet_list->push_back(paced_sender::Packet(ssrc, sequence_number,
- capture_time_ms, bytes,
+ packet_list->push_back(paced_sender::Packet(ssrc,
+ sequence_number,
+ capture_time_ms,
+ TickTime::MillisecondTimestamp(),
+ bytes,
retransmission));
return false;
}
@@ -215,23 +220,23 @@
int PacedSender::QueueInMs() const {
CriticalSectionScoped cs(critsect_.get());
int64_t now_ms = TickTime::MillisecondTimestamp();
- int64_t oldest_packet_capture_time = now_ms;
+ int64_t oldest_packet_enqueue_time = now_ms;
if (!high_priority_packets_->empty()) {
- oldest_packet_capture_time = std::min(
- oldest_packet_capture_time,
- high_priority_packets_->front().capture_time_ms_);
+ oldest_packet_enqueue_time = std::min(
+ oldest_packet_enqueue_time,
+ high_priority_packets_->front().enqueue_time_ms_);
}
if (!normal_priority_packets_->empty()) {
- oldest_packet_capture_time = std::min(
- oldest_packet_capture_time,
- normal_priority_packets_->front().capture_time_ms_);
+ oldest_packet_enqueue_time = std::min(
+ oldest_packet_enqueue_time,
+ normal_priority_packets_->front().enqueue_time_ms_);
}
if (!low_priority_packets_->empty()) {
- oldest_packet_capture_time = std::min(
- oldest_packet_capture_time,
- low_priority_packets_->front().capture_time_ms_);
+ oldest_packet_enqueue_time = std::min(
+ oldest_packet_enqueue_time,
+ low_priority_packets_->front().enqueue_time_ms_);
}
- return now_ms - oldest_packet_capture_time;
+ return now_ms - oldest_packet_enqueue_time;
}
int32_t PacedSender::TimeUntilNextProcess() {
@@ -286,17 +291,13 @@
// MUST have critsect_ when calling.
bool PacedSender::SendPacketFromList(paced_sender::PacketList* packet_list) {
- uint32_t ssrc;
- uint16_t sequence_number;
- int64_t capture_time_ms;
- bool retransmission;
- GetNextPacketFromList(packet_list, &ssrc, &sequence_number,
- &capture_time_ms, &retransmission);
+ paced_sender::Packet packet = GetNextPacketFromList(packet_list);
critsect_->Leave();
- const bool success = callback_->TimeToSendPacket(ssrc, sequence_number,
- capture_time_ms,
- retransmission);
+ const bool success = callback_->TimeToSendPacket(packet.ssrc_,
+ packet.sequence_number_,
+ packet.capture_time_ms_,
+ packet.retransmission_);
critsect_->Enter();
// If packet cannot be sent then keep it in packet list and exit early.
// There's no need to send more packets.
@@ -305,13 +306,14 @@
}
packet_list->pop_front();
const bool last_packet = packet_list->empty() ||
- packet_list->front().capture_time_ms_ > capture_time_ms;
+ packet_list->front().capture_time_ms_ > packet.capture_time_ms_;
if (packet_list != high_priority_packets_.get()) {
- if (capture_time_ms > capture_time_ms_last_sent_) {
- capture_time_ms_last_sent_ = capture_time_ms;
- } else if (capture_time_ms == capture_time_ms_last_sent_ &&
+ if (packet.capture_time_ms_ > capture_time_ms_last_sent_) {
+ capture_time_ms_last_sent_ = packet.capture_time_ms_;
+ } else if (packet.capture_time_ms_ == capture_time_ms_last_sent_ &&
last_packet) {
- TRACE_EVENT_ASYNC_END0("webrtc_rtp", "PacedSend", capture_time_ms);
+ TRACE_EVENT_ASYNC_END0("webrtc_rtp", "PacedSend",
+ packet.capture_time_ms_);
}
}
return true;
@@ -374,15 +376,11 @@
return false;
}
-void PacedSender::GetNextPacketFromList(paced_sender::PacketList* packets,
- uint32_t* ssrc, uint16_t* sequence_number, int64_t* capture_time_ms,
- bool* retransmission) {
+paced_sender::Packet PacedSender::GetNextPacketFromList(
+ paced_sender::PacketList* packets) {
paced_sender::Packet packet = packets->front();
UpdateMediaBytesSent(packet.bytes_);
- *sequence_number = packet.sequence_number_;
- *ssrc = packet.ssrc_;
- *capture_time_ms = packet.capture_time_ms_;
- *retransmission = packet.retransmission_;
+ return packet;
}
// MUST have critsect_ when calling.
diff --git a/modules/pacing/paced_sender_unittest.cc b/modules/pacing/paced_sender_unittest.cc
index 655c03d..f8dcdfc 100644
--- a/modules/pacing/paced_sender_unittest.cc
+++ b/modules/pacing/paced_sender_unittest.cc
@@ -368,8 +368,6 @@
uint32_t ssrc = 12346;
uint16_t sequence_number = 1234;
int64_t capture_time_ms = TickTime::MillisecondTimestamp();
- TickTime::AdvanceFakeClock(10000);
- int64_t second_capture_time_ms = TickTime::MillisecondTimestamp();
EXPECT_EQ(0, send_bucket_->QueueInMs());
@@ -384,10 +382,6 @@
send_bucket_->Pause();
- // Expect everything to be queued.
- EXPECT_FALSE(send_bucket_->SendPacket(PacedSender::kLowPriority,
- ssrc_low_priority, sequence_number++, second_capture_time_ms, 250,
- false));
EXPECT_FALSE(send_bucket_->SendPacket(PacedSender::kNormalPriority,
ssrc, sequence_number++, capture_time_ms, 250, false));
EXPECT_FALSE(send_bucket_->SendPacket(PacedSender::kNormalPriority,
@@ -395,6 +389,14 @@
EXPECT_FALSE(send_bucket_->SendPacket(PacedSender::kHighPriority,
ssrc, sequence_number++, capture_time_ms, 250, false));
+ TickTime::AdvanceFakeClock(10000);
+ int64_t second_capture_time_ms = TickTime::MillisecondTimestamp();
+
+ // Expect everything to be queued.
+ EXPECT_FALSE(send_bucket_->SendPacket(PacedSender::kLowPriority,
+ ssrc_low_priority, sequence_number++, second_capture_time_ms, 250,
+ false));
+
EXPECT_EQ(TickTime::MillisecondTimestamp() - capture_time_ms,
send_bucket_->QueueInMs());
@@ -441,13 +443,14 @@
capture_time_ms,
250,
false));
+ TickTime::AdvanceFakeClock(1);
EXPECT_FALSE(send_bucket_->SendPacket(PacedSender::kNormalPriority,
ssrc,
sequence_number + 1,
capture_time_ms + 1,
250,
false));
- TickTime::AdvanceFakeClock(10000);
+ TickTime::AdvanceFakeClock(9999);
EXPECT_EQ(TickTime::MillisecondTimestamp() - capture_time_ms,
send_bucket_->QueueInMs());
// Fails to send first packet so only one call.
@@ -516,5 +519,24 @@
TickTime::AdvanceFakeClock(31);
send_bucket_->Process();
}
+
+TEST_F(PacedSenderTest, QueueTimeGrowsOverTime) {
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+ EXPECT_EQ(0, send_bucket_->QueueInMs());
+
+ send_bucket_->UpdateBitrate(30, 0, 0);
+ SendAndExpectPacket(PacedSender::kNormalPriority,
+ ssrc,
+ sequence_number,
+ TickTime::MillisecondTimestamp(),
+ 1200,
+ false);
+
+ TickTime::AdvanceFakeClock(500);
+ EXPECT_EQ(500, send_bucket_->QueueInMs());
+ send_bucket_->Process();
+ EXPECT_EQ(0, send_bucket_->QueueInMs());
+}
} // namespace test
} // namespace webrtc
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
index 69b35c5..a544ee5 100644
--- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
@@ -17,6 +17,7 @@
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -225,6 +226,8 @@
RemoteBitrateObserver* observer,
Clock* clock,
uint32_t min_bitrate_bps) const {
+ WEBRTC_TRACE(kTraceStateInfo, kTraceRemoteBitrateEstimator, -1,
+ "RemoteBitrateEstimatorFactory: Instantiating.");
return new RemoteBitrateEstimatorSingleStream(observer, clock,
min_bitrate_bps);
}
@@ -233,6 +236,8 @@
RemoteBitrateObserver* observer,
Clock* clock,
uint32_t min_bitrate_bps) const {
+ WEBRTC_TRACE(kTraceStateInfo, kTraceRemoteBitrateEstimator, -1,
+ "AbsoluteSendTimeRemoteBitrateEstimatorFactory: Instantiating.");
return new RemoteBitrateEstimatorSingleStream(observer, clock,
min_bitrate_bps);
}
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index b476f2c..89e9eb2 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -1135,10 +1135,13 @@
id_,
"SendNACK(size:%u)", size);
- uint16_t avg_rtt = 0;
- rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), NULL, &avg_rtt, NULL, NULL);
+ // Use RTT from RtcpRttStats class if provided.
+ uint16_t rtt = rtt_ms();
+ if (rtt == 0) {
+ rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), NULL, &rtt, NULL, NULL);
+ }
- int64_t wait_time = 5 + ((avg_rtt * 3) >> 1); // 5 + RTT * 1.5.
+ int64_t wait_time = 5 + ((rtt * 3) >> 1); // 5 + RTT * 1.5.
if (wait_time == 5) {
wait_time = 100; // During startup we don't have an RTT.
}
@@ -1577,7 +1580,6 @@
RtpRtcp* module = *it;
if (module)
module->RegisterVideoBitrateObserver(observer);
- ++it;
}
return;
}
@@ -1622,9 +1624,12 @@
nack_sequence_numbers.size() == 0) {
return;
}
- uint16_t avg_rtt = 0;
- rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), NULL, &avg_rtt, NULL, NULL);
- rtp_sender_.OnReceivedNACK(nack_sequence_numbers, avg_rtt);
+ // Use RTT from RtcpRttStats class if provided.
+ uint16_t rtt = rtt_ms();
+ if (rtt == 0) {
+ rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), NULL, &rtt, NULL, NULL);
+ }
+ rtp_sender_.OnReceivedNACK(nack_sequence_numbers, rtt);
}
int32_t ModuleRtpRtcpImpl::LastReceivedNTP(
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index 054e2f3..075770d 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -410,6 +410,7 @@
Clock* clock_;
private:
+ FRIEND_TEST_ALL_PREFIXES(RtpRtcpImplTest, Rtt);
FRIEND_TEST_ALL_PREFIXES(RtpRtcpImplTest, RttForReceiverOnly);
int64_t RtcpReportInterval();
void SetRtcpReceiverSsrcs(uint32_t main_ssrc);
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 6248f49..50f7f2e 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -122,6 +122,13 @@
// No RTT from other ssrc.
EXPECT_EQ(-1,
rtp_rtcp_impl_->RTT(kSsrc + 1, &rtt, &avg_rtt, &min_rtt, &max_rtt));
+
+ // Verify RTT from rtt_stats config.
+ EXPECT_EQ(0U, rtt_stats_.LastProcessedRtt());
+ EXPECT_EQ(0U, rtp_rtcp_impl_->rtt_ms());
+ rtp_rtcp_impl_->Process();
+ EXPECT_EQ(100U, rtt_stats_.LastProcessedRtt());
+ EXPECT_EQ(100U, rtp_rtcp_impl_->rtt_ms());
}
TEST_F(RtpRtcpImplTest, SetRtcpXrRrtrStatus) {
@@ -147,7 +154,6 @@
// Verify RTT.
EXPECT_EQ(0U, rtt_stats_.LastProcessedRtt());
EXPECT_EQ(0U, rtp_rtcp_impl_->rtt_ms());
-
rtp_rtcp_impl_->Process();
EXPECT_EQ(100U, rtt_stats_.LastProcessedRtt());
EXPECT_EQ(100U, rtp_rtcp_impl_->rtt_ms());
diff --git a/modules/rtp_rtcp/source/rtp_utility.cc b/modules/rtp_rtcp/source/rtp_utility.cc
index f50b20a..102ebec 100644
--- a/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/modules/rtp_rtcp/source/rtp_utility.cc
@@ -391,9 +391,11 @@
// If in effect, MAY be omitted for those packets for which the offset
// is zero.
+ header.extension.hasTransmissionTimeOffset = false;
header.extension.transmissionTimeOffset = 0;
// May not be present in packet.
+ header.extension.hasAbsoluteSendTime = false;
header.extension.absoluteSendTime = 0;
if (X) {
@@ -490,6 +492,7 @@
// Negative offset, correct sign for Word24 to Word32.
header.extension.transmissionTimeOffset |= 0xFF000000;
}
+ header.extension.hasTransmissionTimeOffset = true;
break;
}
case kRtpExtensionAudioLevel: {
@@ -524,6 +527,7 @@
absoluteSendTime += *ptr++ << 8;
absoluteSendTime += *ptr++;
header.extension.absoluteSendTime = absoluteSendTime;
+ header.extension.hasAbsoluteSendTime = true;
break;
}
default: {
diff --git a/system_wrappers/source/trace_impl.cc b/system_wrappers/source/trace_impl.cc
index 4d30bca..8dbe76b 100644
--- a/system_wrappers/source/trace_impl.cc
+++ b/system_wrappers/source/trace_impl.cc
@@ -273,6 +273,10 @@
sprintf(trace_message, " VIDEO PROC:%5ld %5ld;", id_engine,
id_channel);
break;
+ case kTraceRemoteBitrateEstimator:
+ sprintf(trace_message, " BWE RBE:%5ld %5ld;", id_engine,
+ id_channel);
+ break;
}
} else {
switch (module) {
@@ -332,6 +336,9 @@
case kTraceVideoPreocessing:
sprintf(trace_message, " VIDEO PROC:%11ld;", idl);
break;
+ case kTraceRemoteBitrateEstimator:
+ sprintf(trace_message, " BWE RBE:%11ld;", idl);
+ break;
}
}
return kMessageLength;
diff --git a/video/bitrate_estimator_tests.cc b/video/bitrate_estimator_tests.cc
new file mode 100644
index 0000000..15bacd3
--- /dev/null
+++ b/video/bitrate_estimator_tests.cc
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include <functional>
+#include <list>
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/call.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/interface/event_wrapper.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/test/direct_transport.h"
+#include "webrtc/test/fake_decoder.h"
+#include "webrtc/test/fake_encoder.h"
+#include "webrtc/test/frame_generator_capturer.h"
+
+namespace webrtc {
+
+static const int kTOFExtensionId = 4;
+static const int kASTExtensionId = 5;
+
+static unsigned int kDefaultTimeoutMs = 30 * 1000;
+static const uint32_t kSendSsrc = 0x654321;
+static const uint32_t kReceiverLocalSsrc = 0x123456;
+static const uint8_t kSendPayloadType = 125;
+
+class BitrateEstimatorTest : public ::testing::Test {
+ public:
+ BitrateEstimatorTest()
+ : receiver_trace_(),
+ send_transport_(),
+ receive_transport_(),
+ sender_call_(),
+ receiver_call_(),
+ send_config_(),
+ receive_config_(),
+ streams_() {
+ }
+
+ virtual ~BitrateEstimatorTest() {
+ EXPECT_TRUE(streams_.empty());
+ }
+
+ virtual void SetUp() {
+ // Create receiver call first so that we are guaranteed to have a trace
+ // callback when sender call is created.
+ Call::Config receiver_call_config(&receive_transport_);
+ receiver_call_config.trace_callback = &receiver_trace_;
+ receiver_call_.reset(Call::Create(receiver_call_config));
+
+ Call::Config sender_call_config(&send_transport_);
+ sender_call_.reset(Call::Create(sender_call_config));
+
+ send_transport_.SetReceiver(receiver_call_->Receiver());
+ receive_transport_.SetReceiver(sender_call_->Receiver());
+
+ send_config_ = sender_call_->GetDefaultSendConfig();
+ send_config_.rtp.ssrcs.push_back(kSendSsrc);
+ // send_config_.encoder will be set by every stream separately.
+ send_config_.internal_source = false;
+ test::FakeEncoder::SetCodecSettings(&send_config_.codec, 1);
+ send_config_.codec.plType = kSendPayloadType;
+
+ receive_config_ = receiver_call_->GetDefaultReceiveConfig();
+ receive_config_.codecs.clear();
+ receive_config_.codecs.push_back(send_config_.codec);
+ // receive_config_.external_decoders will be set by every stream separately.
+ receive_config_.rtp.remote_ssrc = send_config_.rtp.ssrcs[0];
+ receive_config_.rtp.local_ssrc = kReceiverLocalSsrc;
+ receive_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTOffset, kTOFExtensionId));
+ receive_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAbsSendTime, kASTExtensionId));
+ }
+
+ virtual void TearDown() {
+ std::for_each(streams_.begin(), streams_.end(),
+ std::mem_fun(&Stream::StopSending));
+
+ send_transport_.StopSending();
+ receive_transport_.StopSending();
+
+ while (!streams_.empty()) {
+ delete streams_.back();
+ streams_.pop_back();
+ }
+
+ // The TraceCallback instance MUST outlive Calls, destroy Calls explicitly.
+ receiver_call_.reset();
+ }
+
+ protected:
+ friend class Stream;
+
+ class TraceObserver : public TraceCallback {
+ public:
+ TraceObserver()
+ : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ received_log_lines_(),
+ expected_log_lines_(),
+ done_(EventWrapper::Create()) {
+ }
+
+ void PushExpectedLogLine(const std::string& expected_log_line) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ expected_log_lines_.push_back(expected_log_line);
+ }
+
+ virtual void Print(TraceLevel level,
+ const char* message,
+ int length) OVERRIDE {
+ CriticalSectionScoped cs(crit_sect_.get());
+ if (!(level & kTraceStateInfo)) {
+ return;
+ }
+ std::string msg(message);
+ if (msg.find("BitrateEstimator") != std::string::npos) {
+ received_log_lines_.push_back(msg);
+ }
+ int num_popped = 0;
+ while (!received_log_lines_.empty() && !expected_log_lines_.empty()) {
+ std::string a = received_log_lines_.front();
+ std::string b = expected_log_lines_.front();
+ received_log_lines_.pop_front();
+ expected_log_lines_.pop_front();
+ num_popped++;
+ EXPECT_TRUE(a.find(b) != std::string::npos);
+ }
+ if (expected_log_lines_.size() <= 0) {
+ if (num_popped > 0) {
+ done_->Set();
+ }
+ return;
+ }
+ }
+
+ EventTypeWrapper Wait() { return done_->Wait(kDefaultTimeoutMs); }
+
+ private:
+ typedef std::list<std::string> Strings;
+ scoped_ptr<CriticalSectionWrapper> crit_sect_;
+ Strings received_log_lines_;
+ Strings expected_log_lines_;
+ scoped_ptr<EventWrapper> done_;
+ };
+
+ class Stream {
+ public:
+ explicit Stream(BitrateEstimatorTest* test)
+ : test_(test),
+ is_sending_receiving_(false),
+ send_stream_(NULL),
+ receive_stream_(NULL),
+ frame_generator_capturer_(),
+ fake_encoder_(Clock::GetRealTimeClock()),
+ fake_decoder_() {
+ test_->send_config_.rtp.ssrcs[0]++;
+ test_->send_config_.encoder = &fake_encoder_;
+ send_stream_ =
+ test_->sender_call_->CreateVideoSendStream(test_->send_config_);
+ frame_generator_capturer_.reset(
+ test::FrameGeneratorCapturer::Create(send_stream_->Input(),
+ test_->send_config_.codec.width,
+ test_->send_config_.codec.height,
+ 30,
+ Clock::GetRealTimeClock()));
+ send_stream_->StartSending();
+ frame_generator_capturer_->Start();
+
+ ExternalVideoDecoder decoder;
+ decoder.decoder = &fake_decoder_;
+ decoder.payload_type = test_->send_config_.codec.plType;
+ test_->receive_config_.rtp.remote_ssrc = test_->send_config_.rtp.ssrcs[0];
+ test_->receive_config_.rtp.local_ssrc++;
+ test_->receive_config_.external_decoders.push_back(decoder);
+ receive_stream_ = test_->receiver_call_->CreateVideoReceiveStream(
+ test_->receive_config_);
+ receive_stream_->StartReceiving();
+
+ is_sending_receiving_ = true;
+ }
+
+ ~Stream() {
+ frame_generator_capturer_.reset(NULL);
+ test_->sender_call_->DestroyVideoSendStream(send_stream_);
+ send_stream_ = NULL;
+ test_->receiver_call_->DestroyVideoReceiveStream(receive_stream_);
+ receive_stream_ = NULL;
+ }
+
+ void StopSending() {
+ if (is_sending_receiving_) {
+ frame_generator_capturer_->Stop();
+ send_stream_->StopSending();
+ receive_stream_->StopReceiving();
+ is_sending_receiving_ = false;
+ }
+ }
+
+ private:
+ BitrateEstimatorTest* test_;
+ bool is_sending_receiving_;
+ VideoSendStream* send_stream_;
+ VideoReceiveStream* receive_stream_;
+ scoped_ptr<test::FrameGeneratorCapturer> frame_generator_capturer_;
+ test::FakeEncoder fake_encoder_;
+ test::FakeDecoder fake_decoder_;
+ };
+
+ TraceObserver receiver_trace_;
+ test::DirectTransport send_transport_;
+ test::DirectTransport receive_transport_;
+ scoped_ptr<Call> sender_call_;
+ scoped_ptr<Call> receiver_call_;
+ VideoSendStream::Config send_config_;
+ VideoReceiveStream::Config receive_config_;
+ std::vector<Stream*> streams_;
+};
+
+TEST_F(BitrateEstimatorTest, InstantiatesTOFPerDefault) {
+ send_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTOffset, kTOFExtensionId));
+ receiver_trace_.PushExpectedLogLine(
+ "RemoteBitrateEstimatorFactory: Instantiating.");
+ receiver_trace_.PushExpectedLogLine(
+ "RemoteBitrateEstimatorFactory: Instantiating.");
+ streams_.push_back(new Stream(this));
+ EXPECT_EQ(kEventSignaled, receiver_trace_.Wait());
+}
+
+TEST_F(BitrateEstimatorTest, SwitchesToAST) {
+ send_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTOffset, kTOFExtensionId));
+ receiver_trace_.PushExpectedLogLine(
+ "RemoteBitrateEstimatorFactory: Instantiating.");
+ receiver_trace_.PushExpectedLogLine(
+ "RemoteBitrateEstimatorFactory: Instantiating.");
+ streams_.push_back(new Stream(this));
+ EXPECT_EQ(kEventSignaled, receiver_trace_.Wait());
+
+ send_config_.rtp.extensions[0] =
+ RtpExtension(RtpExtension::kAbsSendTime, kASTExtensionId);
+ receiver_trace_.PushExpectedLogLine("Switching to absolute send time RBE.");
+ receiver_trace_.PushExpectedLogLine(
+ "AbsoluteSendTimeRemoteBitrateEstimatorFactory: Instantiating.");
+ streams_.push_back(new Stream(this));
+ EXPECT_EQ(kEventSignaled, receiver_trace_.Wait());
+}
+
+TEST_F(BitrateEstimatorTest, SwitchesToASTThenBackToTOF) {
+ send_config_.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTOffset, kTOFExtensionId));
+ receiver_trace_.PushExpectedLogLine(
+ "RemoteBitrateEstimatorFactory: Instantiating.");
+ receiver_trace_.PushExpectedLogLine(
+ "RemoteBitrateEstimatorFactory: Instantiating.");
+ streams_.push_back(new Stream(this));
+ EXPECT_EQ(kEventSignaled, receiver_trace_.Wait());
+
+ send_config_.rtp.extensions[0] =
+ RtpExtension(RtpExtension::kAbsSendTime, kASTExtensionId);
+ receiver_trace_.PushExpectedLogLine("Switching to absolute send time RBE.");
+ receiver_trace_.PushExpectedLogLine(
+ "AbsoluteSendTimeRemoteBitrateEstimatorFactory: Instantiating.");
+ streams_.push_back(new Stream(this));
+ EXPECT_EQ(kEventSignaled, receiver_trace_.Wait());
+
+ send_config_.rtp.extensions[0] =
+ RtpExtension(RtpExtension::kTOffset, kTOFExtensionId);
+ receiver_trace_.PushExpectedLogLine(
+ "WrappingBitrateEstimator: Switching to transmission time offset RBE.");
+ receiver_trace_.PushExpectedLogLine(
+ "RemoteBitrateEstimatorFactory: Instantiating.");
+ streams_.push_back(new Stream(this));
+ streams_[0]->StopSending();
+ streams_[1]->StopSending();
+ EXPECT_EQ(kEventSignaled, receiver_trace_.Wait());
+}
+} // namespace webrtc
diff --git a/video/call_tests.cc b/video/call_tests.cc
index 96ad832..1b0c874 100644
--- a/video/call_tests.cc
+++ b/video/call_tests.cc
@@ -49,7 +49,7 @@
receive_stream_(NULL),
fake_encoder_(Clock::GetRealTimeClock()) {}
- ~CallTest() {
+ virtual ~CallTest() {
EXPECT_EQ(NULL, send_stream_);
EXPECT_EQ(NULL, receive_stream_);
}
@@ -853,15 +853,15 @@
for (size_t i = 0; i < kNumStreams; ++i) {
frame_generators[i]->Stop();
- delete frame_generators[i];
sender_call->DestroyVideoSendStream(send_streams[i]);
receiver_call->DestroyVideoReceiveStream(receive_streams[i]);
+ delete frame_generators[i];
delete observers[i];
}
sender_transport.StopSending();
receiver_transport.StopSending();
-}
+};
TEST_F(CallTest, ObserversEncodedFrames) {
class EncodedFrameTestObserver : public EncodedFrameObserver {
diff --git a/video/rampup_tests.cc b/video/rampup_tests.cc
index 08676a2..0386bd0 100644
--- a/video/rampup_tests.cc
+++ b/video/rampup_tests.cc
@@ -36,7 +36,7 @@
namespace webrtc {
namespace {
- static const int kTOffsetExtensionId = 7;
+ static const int kAbsoluteSendTimeExtensionId = 7;
static const int kMaxPacketSize = 1500;
}
@@ -74,8 +74,8 @@
rtp_rtcp_.reset(RtpRtcp::CreateRtpRtcp(config));
rtp_rtcp_->SetREMBStatus(true);
rtp_rtcp_->SetRTCPStatus(kRtcpNonCompound);
- rtp_parser_->RegisterRtpHeaderExtension(kRtpExtensionTransmissionTimeOffset,
- kTOffsetExtensionId);
+ rtp_parser_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId);
AbsoluteSendTimeRemoteBitrateEstimatorFactory rbe_factory;
const uint32_t kRemoteBitrateEstimatorMinBitrateBps = 30000;
remote_bitrate_estimator_.reset(
@@ -220,7 +220,7 @@
kRtxSsrcs + kNumberOfStreams);
}
send_config.rtp.extensions.push_back(
- RtpExtension(RtpExtension::kTOffset, kTOffsetExtensionId));
+ RtpExtension(RtpExtension::kAbsSendTime, kAbsoluteSendTimeExtensionId));
VideoSendStream* send_stream = call->CreateVideoSendStream(send_config);
diff --git a/video/video_receive_stream.cc b/video/video_receive_stream.cc
index 31c8524..8d4dfd6 100644
--- a/video/video_receive_stream.cc
+++ b/video/video_receive_stream.cc
@@ -63,6 +63,20 @@
rtp_rtcp_->SetLocalSSRC(channel_, config_.rtp.local_ssrc);
rtp_rtcp_->SetRembStatus(channel_, false, config_.rtp.remb);
+ for (size_t i = 0; i < config_.rtp.extensions.size(); ++i) {
+ const std::string& extension = config_.rtp.extensions[i].name;
+ int id = config_.rtp.extensions[i].id;
+ if (extension == RtpExtension::kTOffset) {
+ if (rtp_rtcp_->SetReceiveTimestampOffsetStatus(channel_, true, id) != 0)
+ abort();
+ } else if (extension == RtpExtension::kAbsSendTime) {
+ if (rtp_rtcp_->SetReceiveAbsoluteSendTimeStatus(channel_, true, id) != 0)
+ abort();
+ } else {
+ abort(); // Unsupported extension.
+ }
+ }
+
network_ = ViENetwork::GetInterface(video_engine);
assert(network_ != NULL);
diff --git a/video/video_send_stream.cc b/video/video_send_stream.cc
index 713cdb9..e18b346 100644
--- a/video/video_send_stream.cc
+++ b/video/video_send_stream.cc
@@ -10,8 +10,6 @@
#include "webrtc/video/video_send_stream.h"
-#include <string.h>
-
#include <string>
#include <vector>
diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc
index ae33e81..25f334f 100644
--- a/video/video_send_stream_tests.cc
+++ b/video/video_send_stream_tests.cc
@@ -190,7 +190,7 @@
frame_generator_capturer->Stop();
send_stream_->StopSending();
call->DestroyVideoSendStream(send_stream_);
-}
+};
TEST_F(VideoSendStreamTest, SendsSetSsrc) { SendsSetSsrcs(1, false); }
@@ -249,8 +249,11 @@
EXPECT_TRUE(
parser_->Parse(packet, static_cast<int>(length), &header));
- if (header.extension.absoluteSendTime > 0)
- observation_complete_->Set();
+ EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
+ EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
+ EXPECT_EQ(header.extension.transmissionTimeOffset, 0);
+ EXPECT_GT(header.extension.absoluteSendTime, 0u);
+ observation_complete_->Set();
return SEND_PACKET;
}
@@ -294,7 +297,10 @@
EXPECT_TRUE(
parser_->Parse(packet, static_cast<int>(length), &header));
+ EXPECT_TRUE(header.extension.hasTransmissionTimeOffset);
+ EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
EXPECT_GT(header.extension.transmissionTimeOffset, 0);
+ EXPECT_EQ(header.extension.absoluteSendTime, 0u);
observation_complete_->Set();
return SEND_PACKET;
diff --git a/video_engine/vie_channel.cc b/video_engine/vie_channel.cc
index 44b90f4..2305ea7 100644
--- a/video_engine/vie_channel.cc
+++ b/video_engine/vie_channel.cc
@@ -92,7 +92,6 @@
bandwidth_observer_(bandwidth_observer),
send_timestamp_extension_id_(kInvalidRtpExtensionId),
absolute_send_time_extension_id_(kInvalidRtpExtensionId),
- receive_absolute_send_time_enabled_(false),
external_transport_(NULL),
decoder_reset_(true),
wait_for_key_frame_(false),
@@ -934,14 +933,9 @@
}
int ViEChannel::SetReceiveAbsoluteSendTimeStatus(bool enable, int id) {
- receive_absolute_send_time_enabled_ = enable;
return vie_receiver_.SetReceiveAbsoluteSendTimeStatus(enable, id) ? 0 : -1;
}
-bool ViEChannel::GetReceiveAbsoluteSendTimeStatus() const {
- return receive_absolute_send_time_enabled_;
-}
-
void ViEChannel::SetRtcpXrRrtrStatus(bool enable) {
CriticalSectionScoped cs(rtp_rtcp_cs_.get());
rtp_rtcp_->SetRtcpXrRrtrStatus(enable);
diff --git a/video_engine/vie_channel.h b/video_engine/vie_channel.h
index de16731..33bf7bf 100644
--- a/video_engine/vie_channel.h
+++ b/video_engine/vie_channel.h
@@ -396,7 +396,6 @@
scoped_ptr<RtcpBandwidthObserver> bandwidth_observer_;
int send_timestamp_extension_id_;
int absolute_send_time_extension_id_;
- bool receive_absolute_send_time_enabled_;
bool using_packet_spread_;
Transport* external_transport_;
diff --git a/video_engine/vie_channel_group.cc b/video_engine/vie_channel_group.cc
index d90d7c2..f079a10 100644
--- a/video_engine/vie_channel_group.cc
+++ b/video_engine/vie_channel_group.cc
@@ -16,6 +16,7 @@
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
#include "webrtc/modules/utility/interface/process_thread.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/video_engine/call_stats.h"
#include "webrtc/video_engine/encoder_state_feedback.h"
#include "webrtc/video_engine/vie_channel.h"
@@ -25,18 +26,22 @@
namespace webrtc {
namespace {
+static const uint32_t kTimeOffsetSwitchThreshold = 30;
+
class WrappingBitrateEstimator : public RemoteBitrateEstimator {
public:
- WrappingBitrateEstimator(RemoteBitrateObserver* observer, Clock* clock,
- ProcessThread* process_thread)
+ WrappingBitrateEstimator(int engine_id, RemoteBitrateObserver* observer,
+ Clock* clock, ProcessThread* process_thread)
: observer_(observer),
clock_(clock),
process_thread_(process_thread),
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ engine_id_(engine_id),
min_bitrate_bps_(30000),
rbe_(RemoteBitrateEstimatorFactory().Create(observer_, clock_,
min_bitrate_bps_)),
- receive_absolute_send_time_(false) {
+ using_absolute_send_time_(false),
+ packets_since_absolute_send_time_(0) {
assert(process_thread_ != NULL);
process_thread_->RegisterModule(rbe_.get());
}
@@ -44,29 +49,11 @@
process_thread_->DeRegisterModule(rbe_.get());
}
- void SetReceiveAbsoluteSendTimeStatus(bool enable) {
- CriticalSectionScoped cs(crit_sect_.get());
- if (enable == receive_absolute_send_time_) {
- return;
- }
-
- process_thread_->DeRegisterModule(rbe_.get());
- if (enable) {
- rbe_.reset(AbsoluteSendTimeRemoteBitrateEstimatorFactory().Create(
- observer_, clock_, min_bitrate_bps_));
- } else {
- rbe_.reset(RemoteBitrateEstimatorFactory().Create(observer_, clock_,
- min_bitrate_bps_));
- }
- process_thread_->RegisterModule(rbe_.get());
-
- receive_absolute_send_time_ = enable;
- }
-
virtual void IncomingPacket(int64_t arrival_time_ms,
int payload_size,
const RTPHeader& header) {
CriticalSectionScoped cs(crit_sect_.get());
+ PickEstimator(header);
rbe_->IncomingPacket(arrival_time_ms, payload_size, header);
}
@@ -97,25 +84,60 @@
}
private:
+ // Instantiate RBE for Time Offset or Absolute Send Time extensions.
+ void PickEstimator(const RTPHeader& header) {
+ if (header.extension.hasAbsoluteSendTime) {
+ // If we see AST in header, switch RBE strategy immediately.
+ if (!using_absolute_send_time_) {
+ process_thread_->DeRegisterModule(rbe_.get());
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVideo, ViEId(engine_id_),
+ "WrappingBitrateEstimator: Switching to absolute send time RBE.");
+ rbe_.reset(AbsoluteSendTimeRemoteBitrateEstimatorFactory().Create(
+ observer_, clock_, min_bitrate_bps_));
+ process_thread_->RegisterModule(rbe_.get());
+ using_absolute_send_time_ = true;
+ }
+ packets_since_absolute_send_time_ = 0;
+ } else {
+ // When we don't see AST, wait for a few packets before going back to TOF.
+ if (using_absolute_send_time_) {
+ ++packets_since_absolute_send_time_;
+ if (packets_since_absolute_send_time_ >= kTimeOffsetSwitchThreshold) {
+ process_thread_->DeRegisterModule(rbe_.get());
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVideo, ViEId(engine_id_),
+ "WrappingBitrateEstimator: Switching to transmission time offset "
+ "RBE.");
+ rbe_.reset(RemoteBitrateEstimatorFactory().Create(observer_, clock_,
+ min_bitrate_bps_));
+ process_thread_->RegisterModule(rbe_.get());
+ using_absolute_send_time_ = false;
+ }
+ }
+ }
+ }
+
RemoteBitrateObserver* observer_;
Clock* clock_;
ProcessThread* process_thread_;
scoped_ptr<CriticalSectionWrapper> crit_sect_;
+ const int engine_id_;
const uint32_t min_bitrate_bps_;
scoped_ptr<RemoteBitrateEstimator> rbe_;
- bool receive_absolute_send_time_;
+ bool using_absolute_send_time_;
+ uint32_t packets_since_absolute_send_time_;
DISALLOW_IMPLICIT_CONSTRUCTORS(WrappingBitrateEstimator);
};
} // namespace
-ChannelGroup::ChannelGroup(ProcessThread* process_thread,
+ChannelGroup::ChannelGroup(int engine_id, ProcessThread* process_thread,
const Config& config)
: remb_(new VieRemb()),
bitrate_controller_(BitrateController::CreateBitrateController(true)),
call_stats_(new CallStats()),
- remote_bitrate_estimator_(new WrappingBitrateEstimator(remb_.get(),
- Clock::GetRealTimeClock(), process_thread)),
+ remote_bitrate_estimator_(new WrappingBitrateEstimator(engine_id,
+ remb_.get(), Clock::GetRealTimeClock(),
+ process_thread)),
encoder_state_feedback_(new EncoderStateFeedback()),
process_thread_(process_thread) {
call_stats_->RegisterStatsObserver(remote_bitrate_estimator_.get());
@@ -186,9 +208,4 @@
}
return true;
}
-
-void ChannelGroup::SetReceiveAbsoluteSendTimeStatus(bool enable) {
- static_cast<WrappingBitrateEstimator*>(remote_bitrate_estimator_.get())->
- SetReceiveAbsoluteSendTimeStatus(enable);
-}
} // namespace webrtc
diff --git a/video_engine/vie_channel_group.h b/video_engine/vie_channel_group.h
index d46a30a..95a042e 100644
--- a/video_engine/vie_channel_group.h
+++ b/video_engine/vie_channel_group.h
@@ -31,7 +31,7 @@
// group are assumed to send/receive data to the same end-point.
class ChannelGroup {
public:
- ChannelGroup(ProcessThread* process_thread,
+ ChannelGroup(int engine_id, ProcessThread* process_thread,
const Config& config);
~ChannelGroup();
@@ -42,7 +42,6 @@
bool SetChannelRembStatus(int channel_id, bool sender, bool receiver,
ViEChannel* channel);
- void SetReceiveAbsoluteSendTimeStatus(bool enable);
BitrateController* GetBitrateController();
CallStats* GetCallStats();
diff --git a/video_engine/vie_channel_manager.cc b/video_engine/vie_channel_manager.cc
index 5fdbde5..b62e282 100644
--- a/video_engine/vie_channel_manager.cc
+++ b/video_engine/vie_channel_manager.cc
@@ -89,7 +89,7 @@
}
// Create a new channel group and add this channel.
- ChannelGroup* group = new ChannelGroup(module_process_thread_,
+ ChannelGroup* group = new ChannelGroup(engine_id_, module_process_thread_,
config_);
BitrateController* bitrate_controller = group->GetBitrateController();
ViEEncoder* vie_encoder = new ViEEncoder(engine_id_, new_channel_id,
@@ -366,35 +366,6 @@
return group->SetChannelRembStatus(channel_id, sender, receiver, channel);
}
-bool ViEChannelManager::SetReceiveAbsoluteSendTimeStatus(int channel_id,
- bool enable,
- int id) {
- CriticalSectionScoped cs(channel_id_critsect_);
- ViEChannel* channel = ViEChannelPtr(channel_id);
- if (!channel) {
- return false;
- }
- if (channel->SetReceiveAbsoluteSendTimeStatus(enable, id) != 0) {
- return false;
- }
-
- // Enable absolute send time extension on the group if at least one of the
- // channels use it.
- ChannelGroup* group = FindGroup(channel_id);
- assert(group);
- bool any_enabled = false;
- for (ChannelMap::const_iterator c_it = channel_map_.begin();
- c_it != channel_map_.end(); ++c_it) {
- if (group->HasChannel(c_it->first) &&
- c_it->second->GetReceiveAbsoluteSendTimeStatus()) {
- any_enabled = true;
- break;
- }
- }
- group->SetReceiveAbsoluteSendTimeStatus(any_enabled);
- return true;
-}
-
void ViEChannelManager::UpdateSsrcs(int channel_id,
const std::list<unsigned int>& ssrcs) {
CriticalSectionScoped cs(channel_id_critsect_);
diff --git a/video_engine/vie_channel_manager.h b/video_engine/vie_channel_manager.h
index 9776435..db9eb11 100644
--- a/video_engine/vie_channel_manager.h
+++ b/video_engine/vie_channel_manager.h
@@ -74,10 +74,6 @@
// Adds a channel to include when sending REMB.
bool SetRembStatus(int channel_id, bool sender, bool receiver);
- // Switches a channel and its associated group to use (or not) the absolute
- // send time header extension with |id|.
- bool SetReceiveAbsoluteSendTimeStatus(int channel_id, bool enable, int id);
-
// Updates the SSRCs for a channel. If one of the SSRCs already is registered,
// it will simply be ignored and no error is returned.
void UpdateSsrcs(int channel_id, const std::list<unsigned int>& ssrcs);
diff --git a/video_engine/vie_rtp_rtcp_impl.cc b/video_engine/vie_rtp_rtcp_impl.cc
index e07ab6c..2bd47be 100644
--- a/video_engine/vie_rtp_rtcp_impl.cc
+++ b/video_engine/vie_rtp_rtcp_impl.cc
@@ -796,8 +796,16 @@
ViEId(shared_data_->instance_id(), video_channel),
"ViERTP_RTCPImpl::SetReceiveAbsoluteSendTimeStatus(%d, %d, %d)",
video_channel, enable, id);
- if (!shared_data_->channel_manager()->SetReceiveAbsoluteSendTimeStatus(
- video_channel, enable, id)) {
+ ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
+ ViEChannel* vie_channel = cs.Channel(video_channel);
+ if (!vie_channel) {
+ WEBRTC_TRACE(kTraceError, kTraceVideo,
+ ViEId(shared_data_->instance_id(), video_channel),
+ "%s: Channel %d doesn't exist", __FUNCTION__, video_channel);
+ shared_data_->SetLastError(kViERtpRtcpInvalidChannelId);
+ return -1;
+ }
+ if (vie_channel->SetReceiveAbsoluteSendTimeStatus(enable, id) != 0) {
shared_data_->SetLastError(kViERtpRtcpUnknownError);
return -1;
}
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
index 4e1913d..1eb55af 100644
--- a/voice_engine/channel.cc
+++ b/voice_engine/channel.cc
@@ -906,6 +906,7 @@
_decryptionRTCPBufferPtr(NULL),
_timeStamp(0), // This is just an offset, RTP module will add it's own random offset
_sendTelephoneEventPayloadType(106),
+ jitter_buffer_playout_timestamp_(0),
playout_timestamp_rtp_(0),
playout_timestamp_rtcp_(0),
_numberOfDiscardedPackets(0),
@@ -4718,6 +4719,8 @@
}
}
+ jitter_buffer_playout_timestamp_ = playout_timestamp;
+
// Remove the playout delay.
playout_timestamp -= (delay_ms * (playout_frequency / 1000));
@@ -5071,10 +5074,10 @@
rtp_receive_frequency = 48000;
}
- // playout_timestamp_rtp_ updated in UpdatePlayoutTimestamp for every incoming
- // packet.
- uint32_t timestamp_diff_ms = (rtp_timestamp - playout_timestamp_rtp_) /
- (rtp_receive_frequency / 1000);
+ // |jitter_buffer_playout_timestamp_| updated in UpdatePlayoutTimestamp for
+ // every incoming packet.
+ uint32_t timestamp_diff_ms = (rtp_timestamp -
+ jitter_buffer_playout_timestamp_) / (rtp_receive_frequency / 1000);
uint16_t packet_delay_ms = (rtp_timestamp - _previousTimestamp) /
(rtp_receive_frequency / 1000);
diff --git a/voice_engine/channel.h b/voice_engine/channel.h
index da55e9d..f8b04fd 100644
--- a/voice_engine/channel.h
+++ b/voice_engine/channel.h
@@ -489,6 +489,9 @@
uint8_t* _decryptionRTCPBufferPtr;
uint32_t _timeStamp;
uint8_t _sendTelephoneEventPayloadType;
+
+ // Timestamp of the audio pulled from NetEq.
+ uint32_t jitter_buffer_playout_timestamp_;
uint32_t playout_timestamp_rtp_;
uint32_t playout_timestamp_rtcp_;
uint32_t playout_delay_ms_;
diff --git a/voice_engine/voe_neteq_stats_unittest.cc b/voice_engine/voe_neteq_stats_unittest.cc
index 66a9a69..4bb4d4a 100644
--- a/voice_engine/voe_neteq_stats_unittest.cc
+++ b/voice_engine/voe_neteq_stats_unittest.cc
@@ -11,17 +11,17 @@
#include "webrtc/voice_engine/include/voe_neteq_stats.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/main/source/audio_coding_module_impl.h"
#include "webrtc/modules/audio_device/include/fake_audio_device.h"
-#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_hardware.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
-#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
-#include "webrtc/modules/audio_coding/main/source/audio_coding_module_impl.h"
namespace webrtc {
namespace voe {
@@ -168,7 +168,7 @@
// Check if the statistics are initialized correctly. Before any call to ACM
// all fields have to be zero.
-TEST_F(VoENetEqStatsTest, InitializedToZero) {
+TEST_F(VoENetEqStatsTest, DISABLED_ON_ANDROID(InitializedToZero)) {
AudioDecodingCallStats stats;
ASSERT_EQ(0,
voe_neteq_stats_->GetDecodingCallStatistics(channel_acm1_, &stats));
@@ -191,7 +191,7 @@
// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
// should result in generating silence, check the associated field.
-TEST_F(VoENetEqStatsTest, SilenceGeneratorCalled) {
+TEST_F(VoENetEqStatsTest, DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
AudioDecodingCallStats stats;
const int kInitialDelay = 100;
@@ -226,7 +226,7 @@
// Insert some packets and pull audio. Check statistics are valid. Then,
// simulate packet loss and check if PLC and PLC-to-CNG statistics are
// correctly updated.
-TEST_F(VoENetEqStatsTest, NetEqCalls) {
+TEST_F(VoENetEqStatsTest, DISABLED_ON_ANDROID(NetEqCalls)) {
AudioDecodingCallStats stats;
const int kNumNormalCalls = 10;
diff --git a/webrtc_tests.gypi b/webrtc_tests.gypi
index 17ff23e..0d2b30e 100644
--- a/webrtc_tests.gypi
+++ b/webrtc_tests.gypi
@@ -33,6 +33,7 @@
'target_name': 'video_engine_tests',
'type': '<(gtest_target_type)',
'sources': [
+ 'video/bitrate_estimator_tests.cc',
'video/call_tests.cc',
'video/video_send_stream_tests.cc',
'test/common_unittest.cc',