| /* |
| * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. |
| * |
| * Use of this source code is governed by a BSD-style license |
| * that can be found in the LICENSE file in the root of the source |
| * tree. An additional intellectual property rights grant can be found |
| * in the file PATENTS. All contributing project authors may |
| * be found in the AUTHORS file in the root of the source tree. |
| */ |
| #include <algorithm> |
| #include <list> |
| #include <map> |
| #include <memory> |
| #include <sstream> |
| #include <string> |
| #include <vector> |
| |
| #include "webrtc/api/video_codecs/video_encoder.h" |
| #include "webrtc/call/call.h" |
| #include "webrtc/common_video/include/frame_callback.h" |
| #include "webrtc/logging/rtc_event_log/rtc_event_log.h" |
| #include "webrtc/media/base/fakevideorenderer.h" |
| #include "webrtc/media/base/mediaconstants.h" |
| #include "webrtc/media/engine/internalencoderfactory.h" |
| #include "webrtc/media/engine/simulcast_encoder_adapter.h" |
| #include "webrtc/media/engine/webrtcvideoencoderfactory.h" |
| #include "webrtc/modules/include/module_common_types.h" |
| #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h" |
| #include "webrtc/modules/rtp_rtcp/source/byte_io.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtp_format.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtp_utility.h" |
| #include "webrtc/modules/video_coding/codecs/h264/include/h264.h" |
| #include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h" |
| #include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h" |
| #include "webrtc/modules/video_coding/include/video_coding_defines.h" |
| #include "webrtc/rtc_base/checks.h" |
| #include "webrtc/rtc_base/event.h" |
| #include "webrtc/rtc_base/file.h" |
| #include "webrtc/rtc_base/optional.h" |
| #include "webrtc/rtc_base/ptr_util.h" |
| #include "webrtc/rtc_base/random.h" |
| #include "webrtc/rtc_base/rate_limiter.h" |
| #include "webrtc/system_wrappers/include/metrics.h" |
| #include "webrtc/system_wrappers/include/metrics_default.h" |
| #include "webrtc/system_wrappers/include/sleep.h" |
| #include "webrtc/test/call_test.h" |
| #include "webrtc/test/direct_transport.h" |
| #include "webrtc/test/encoder_settings.h" |
| #include "webrtc/test/fake_decoder.h" |
| #include "webrtc/test/fake_encoder.h" |
| #include "webrtc/test/field_trial.h" |
| #include "webrtc/test/frame_generator.h" |
| #include "webrtc/test/frame_generator_capturer.h" |
| #include "webrtc/test/gmock.h" |
| #include "webrtc/test/gtest.h" |
| #include "webrtc/test/null_transport.h" |
| #include "webrtc/test/rtcp_packet_parser.h" |
| #include "webrtc/test/rtp_rtcp_observer.h" |
| #include "webrtc/test/testsupport/fileutils.h" |
| #include "webrtc/test/testsupport/perf_test.h" |
| #include "webrtc/video/transport_adapter.h" |
| |
| // Flaky under MemorySanitizer: bugs.webrtc.org/7419 |
| #if defined(MEMORY_SANITIZER) |
| #define MAYBE_InitialProbing DISABLED_InitialProbing |
| // Fails on iOS bots: bugs.webrtc.org/7851 |
| #elif defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR |
| #define MAYBE_InitialProbing DISABLED_InitialProbing |
| #else |
| #define MAYBE_InitialProbing InitialProbing |
| #endif |
| |
| namespace webrtc { |
| |
| namespace { |
| constexpr int kSilenceTimeoutMs = 2000; |
| } |
| |
| class EndToEndTest : public test::CallTest { |
| public: |
| EndToEndTest() {} |
| |
| virtual ~EndToEndTest() { |
| EXPECT_EQ(nullptr, video_send_stream_); |
| EXPECT_TRUE(video_receive_streams_.empty()); |
| } |
| |
| protected: |
| class UnusedTransport : public Transport { |
| private: |
| bool SendRtp(const uint8_t* packet, |
| size_t length, |
| const PacketOptions& options) override { |
| ADD_FAILURE() << "Unexpected RTP sent."; |
| return false; |
| } |
| |
| bool SendRtcp(const uint8_t* packet, size_t length) override { |
| ADD_FAILURE() << "Unexpected RTCP sent."; |
| return false; |
| } |
| }; |
| |
| class RequiredTransport : public Transport { |
| public: |
| RequiredTransport(bool rtp_required, bool rtcp_required) |
| : need_rtp_(rtp_required), need_rtcp_(rtcp_required) {} |
| ~RequiredTransport() { |
| if (need_rtp_) { |
| ADD_FAILURE() << "Expected RTP packet not sent."; |
| } |
| if (need_rtcp_) { |
| ADD_FAILURE() << "Expected RTCP packet not sent."; |
| } |
| } |
| |
| private: |
| bool SendRtp(const uint8_t* packet, |
| size_t length, |
| const PacketOptions& options) override { |
| rtc::CritScope lock(&crit_); |
| need_rtp_ = false; |
| return true; |
| } |
| |
| bool SendRtcp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| need_rtcp_ = false; |
| return true; |
| } |
| bool need_rtp_; |
| bool need_rtcp_; |
| rtc::CriticalSection crit_; |
| }; |
| |
| void DecodesRetransmittedFrame(bool enable_rtx, bool enable_red); |
| void ReceivesPliAndRecovers(int rtp_history_ms); |
| void RespectsRtcpMode(RtcpMode rtcp_mode); |
| void TestSendsSetSsrcs(size_t num_ssrcs, bool send_single_ssrc_first); |
| void TestRtpStatePreservation(bool use_rtx, bool provoke_rtcpsr_before_rtp); |
| void VerifyHistogramStats(bool use_rtx, bool use_red, bool screenshare); |
| void VerifyNewVideoSendStreamsRespectNetworkState( |
| MediaType network_to_bring_up, |
| VideoEncoder* encoder, |
| Transport* transport); |
| void VerifyNewVideoReceiveStreamsRespectNetworkState( |
| MediaType network_to_bring_up, |
| Transport* transport); |
| }; |
| |
| TEST_F(EndToEndTest, ReceiverCanBeStartedTwice) { |
| CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get())); |
| |
| test::NullTransport transport; |
| CreateSendConfig(1, 0, 0, &transport); |
| CreateMatchingReceiveConfigs(&transport); |
| |
| CreateVideoStreams(); |
| |
| video_receive_streams_[0]->Start(); |
| video_receive_streams_[0]->Start(); |
| |
| DestroyStreams(); |
| } |
| |
| TEST_F(EndToEndTest, ReceiverCanBeStoppedTwice) { |
| CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get())); |
| |
| test::NullTransport transport; |
| CreateSendConfig(1, 0, 0, &transport); |
| CreateMatchingReceiveConfigs(&transport); |
| |
| CreateVideoStreams(); |
| |
| video_receive_streams_[0]->Stop(); |
| video_receive_streams_[0]->Stop(); |
| |
| DestroyStreams(); |
| } |
| |
| TEST_F(EndToEndTest, ReceiverCanBeStoppedAndRestarted) { |
| CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get())); |
| |
| test::NullTransport transport; |
| CreateSendConfig(1, 0, 0, &transport); |
| CreateMatchingReceiveConfigs(&transport); |
| |
| CreateVideoStreams(); |
| |
| video_receive_streams_[0]->Stop(); |
| video_receive_streams_[0]->Start(); |
| video_receive_streams_[0]->Stop(); |
| |
| DestroyStreams(); |
| } |
| |
| TEST_F(EndToEndTest, RendersSingleDelayedFrame) { |
| static const int kWidth = 320; |
| static const int kHeight = 240; |
| // This constant is chosen to be higher than the timeout in the video_render |
| // module. This makes sure that frames aren't dropped if there are no other |
| // frames in the queue. |
| static const int kRenderDelayMs = 1000; |
| |
| class Renderer : public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| Renderer() : event_(false, false) {} |
| |
| void OnFrame(const VideoFrame& video_frame) override { |
| SleepMs(kRenderDelayMs); |
| event_.Set(); |
| } |
| |
| bool Wait() { return event_.Wait(kDefaultTimeoutMs); } |
| |
| rtc::Event event_; |
| } renderer; |
| |
| test::FrameForwarder frame_forwarder; |
| std::unique_ptr<test::DirectTransport> sender_transport; |
| std::unique_ptr<test::DirectTransport> receiver_transport; |
| |
| task_queue_.SendTask([this, &renderer, &frame_forwarder, &sender_transport, |
| &receiver_transport]() { |
| CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get())); |
| |
| sender_transport = rtc::MakeUnique<test::DirectTransport>( |
| &task_queue_, sender_call_.get(), payload_type_map_); |
| receiver_transport = rtc::MakeUnique<test::DirectTransport>( |
| &task_queue_, receiver_call_.get(), payload_type_map_); |
| sender_transport->SetReceiver(receiver_call_->Receiver()); |
| receiver_transport->SetReceiver(sender_call_->Receiver()); |
| |
| CreateSendConfig(1, 0, 0, sender_transport.get()); |
| CreateMatchingReceiveConfigs(receiver_transport.get()); |
| |
| video_receive_configs_[0].renderer = &renderer; |
| |
| CreateVideoStreams(); |
| Start(); |
| |
| // Create frames that are smaller than the send width/height, this is done |
| // to check that the callbacks are done after processing video. |
| std::unique_ptr<test::FrameGenerator> frame_generator( |
| test::FrameGenerator::CreateSquareGenerator(kWidth, kHeight)); |
| video_send_stream_->SetSource( |
| &frame_forwarder, |
| VideoSendStream::DegradationPreference::kMaintainFramerate); |
| |
| frame_forwarder.IncomingCapturedFrame(*frame_generator->NextFrame()); |
| }); |
| |
| EXPECT_TRUE(renderer.Wait()) |
| << "Timed out while waiting for the frame to render."; |
| |
| task_queue_.SendTask([this, &sender_transport, &receiver_transport]() { |
| Stop(); |
| DestroyStreams(); |
| sender_transport.reset(); |
| receiver_transport.reset(); |
| DestroyCalls(); |
| }); |
| } |
| |
| TEST_F(EndToEndTest, TransmitsFirstFrame) { |
| class Renderer : public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| Renderer() : event_(false, false) {} |
| |
| void OnFrame(const VideoFrame& video_frame) override { event_.Set(); } |
| |
| bool Wait() { return event_.Wait(kDefaultTimeoutMs); } |
| |
| rtc::Event event_; |
| } renderer; |
| |
| std::unique_ptr<test::FrameGenerator> frame_generator; |
| test::FrameForwarder frame_forwarder; |
| |
| std::unique_ptr<test::DirectTransport> sender_transport; |
| std::unique_ptr<test::DirectTransport> receiver_transport; |
| |
| task_queue_.SendTask([this, &renderer, &frame_generator, &frame_forwarder, |
| &sender_transport, &receiver_transport]() { |
| CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get())); |
| |
| sender_transport = rtc::MakeUnique<test::DirectTransport>( |
| &task_queue_, sender_call_.get(), payload_type_map_); |
| receiver_transport = rtc::MakeUnique<test::DirectTransport>( |
| &task_queue_, receiver_call_.get(), payload_type_map_); |
| sender_transport->SetReceiver(receiver_call_->Receiver()); |
| receiver_transport->SetReceiver(sender_call_->Receiver()); |
| |
| CreateSendConfig(1, 0, 0, sender_transport.get()); |
| CreateMatchingReceiveConfigs(receiver_transport.get()); |
| video_receive_configs_[0].renderer = &renderer; |
| |
| CreateVideoStreams(); |
| Start(); |
| |
| frame_generator = test::FrameGenerator::CreateSquareGenerator( |
| kDefaultWidth, kDefaultHeight); |
| video_send_stream_->SetSource( |
| &frame_forwarder, |
| VideoSendStream::DegradationPreference::kMaintainFramerate); |
| frame_forwarder.IncomingCapturedFrame(*frame_generator->NextFrame()); |
| }); |
| |
| EXPECT_TRUE(renderer.Wait()) |
| << "Timed out while waiting for the frame to render."; |
| |
| task_queue_.SendTask([this, &sender_transport, &receiver_transport]() { |
| Stop(); |
| DestroyStreams(); |
| sender_transport.reset(); |
| receiver_transport.reset(); |
| DestroyCalls(); |
| }); |
| } |
| |
| class CodecObserver : public test::EndToEndTest, |
| public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| CodecObserver(int no_frames_to_wait_for, |
| VideoRotation rotation_to_test, |
| const std::string& payload_name, |
| webrtc::VideoEncoder* encoder, |
| webrtc::VideoDecoder* decoder) |
| : EndToEndTest(4 * webrtc::EndToEndTest::kDefaultTimeoutMs), |
| // TODO(hta): This timeout (120 seconds) is excessive. |
| // https://bugs.webrtc.org/6830 |
| no_frames_to_wait_for_(no_frames_to_wait_for), |
| expected_rotation_(rotation_to_test), |
| payload_name_(payload_name), |
| encoder_(encoder), |
| decoder_(decoder), |
| frame_counter_(0) {} |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << "Timed out while waiting for enough frames to be decoded."; |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| send_config->encoder_settings.encoder = encoder_.get(); |
| send_config->encoder_settings.payload_name = payload_name_; |
| send_config->encoder_settings.payload_type = |
| test::CallTest::kVideoSendPayloadType; |
| |
| (*receive_configs)[0].renderer = this; |
| (*receive_configs)[0].decoders.resize(1); |
| (*receive_configs)[0].decoders[0].payload_type = |
| send_config->encoder_settings.payload_type; |
| (*receive_configs)[0].decoders[0].payload_name = |
| send_config->encoder_settings.payload_name; |
| (*receive_configs)[0].decoders[0].decoder = decoder_.get(); |
| } |
| |
| void OnFrame(const VideoFrame& video_frame) override { |
| EXPECT_EQ(expected_rotation_, video_frame.rotation()); |
| if (++frame_counter_ == no_frames_to_wait_for_) |
| observation_complete_.Set(); |
| } |
| |
| void OnFrameGeneratorCapturerCreated( |
| test::FrameGeneratorCapturer* frame_generator_capturer) override { |
| frame_generator_capturer->SetFakeRotation(expected_rotation_); |
| } |
| |
| private: |
| int no_frames_to_wait_for_; |
| VideoRotation expected_rotation_; |
| std::string payload_name_; |
| std::unique_ptr<webrtc::VideoEncoder> encoder_; |
| std::unique_ptr<webrtc::VideoDecoder> decoder_; |
| int frame_counter_; |
| }; |
| |
| TEST_F(EndToEndTest, SendsAndReceivesVP8) { |
| CodecObserver test(5, kVideoRotation_0, "VP8", VP8Encoder::Create(), |
| VP8Decoder::Create()); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, SendsAndReceivesVP8Rotation90) { |
| CodecObserver test(5, kVideoRotation_90, "VP8", VP8Encoder::Create(), |
| VP8Decoder::Create()); |
| RunBaseTest(&test); |
| } |
| |
| #if !defined(RTC_DISABLE_VP9) |
| TEST_F(EndToEndTest, SendsAndReceivesVP9) { |
| CodecObserver test(500, kVideoRotation_0, "VP9", VP9Encoder::Create(), |
| VP9Decoder::Create()); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, SendsAndReceivesVP9VideoRotation90) { |
| CodecObserver test(5, kVideoRotation_90, "VP9", VP9Encoder::Create(), |
| VP9Decoder::Create()); |
| RunBaseTest(&test); |
| } |
| #endif // !defined(RTC_DISABLE_VP9) |
| |
| #if defined(WEBRTC_USE_H264) |
| TEST_F(EndToEndTest, SendsAndReceivesH264) { |
| CodecObserver test(500, kVideoRotation_0, "H264", |
| H264Encoder::Create(cricket::VideoCodec("H264")), |
| H264Decoder::Create()); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, SendsAndReceivesH264VideoRotation90) { |
| CodecObserver test(5, kVideoRotation_90, "H264", |
| H264Encoder::Create(cricket::VideoCodec("H264")), |
| H264Decoder::Create()); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, SendsAndReceivesH264PacketizationMode0) { |
| cricket::VideoCodec codec = cricket::VideoCodec("H264"); |
| codec.SetParam(cricket::kH264FmtpPacketizationMode, "0"); |
| CodecObserver test(500, kVideoRotation_0, "H264", H264Encoder::Create(codec), |
| H264Decoder::Create()); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, SendsAndReceivesH264PacketizationMode1) { |
| cricket::VideoCodec codec = cricket::VideoCodec("H264"); |
| codec.SetParam(cricket::kH264FmtpPacketizationMode, "1"); |
| CodecObserver test(500, kVideoRotation_0, "H264", H264Encoder::Create(codec), |
| H264Decoder::Create()); |
| RunBaseTest(&test); |
| } |
| |
| #endif // defined(WEBRTC_USE_H264) |
| |
| TEST_F(EndToEndTest, ReceiverUsesLocalSsrc) { |
| class SyncRtcpObserver : public test::EndToEndTest { |
| public: |
| SyncRtcpObserver() : EndToEndTest(kDefaultTimeoutMs) {} |
| |
| Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { |
| test::RtcpPacketParser parser; |
| EXPECT_TRUE(parser.Parse(packet, length)); |
| EXPECT_EQ(kReceiverLocalVideoSsrc, parser.sender_ssrc()); |
| observation_complete_.Set(); |
| |
| return SEND_PACKET; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << "Timed out while waiting for a receiver RTCP packet to be sent."; |
| } |
| } test; |
| |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, ReceivesAndRetransmitsNack) { |
| static const int kNumberOfNacksToObserve = 2; |
| static const int kLossBurstSize = 2; |
| static const int kPacketsBetweenLossBursts = 9; |
| class NackObserver : public test::EndToEndTest { |
| public: |
| NackObserver() |
| : EndToEndTest(kLongTimeoutMs), |
| sent_rtp_packets_(0), |
| packets_left_to_drop_(0), |
| nacks_left_(kNumberOfNacksToObserve) {} |
| |
| private: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| |
| // Never drop retransmitted packets. |
| if (dropped_packets_.find(header.sequenceNumber) != |
| dropped_packets_.end()) { |
| retransmitted_packets_.insert(header.sequenceNumber); |
| return SEND_PACKET; |
| } |
| |
| if (nacks_left_ <= 0 && |
| retransmitted_packets_.size() == dropped_packets_.size()) { |
| observation_complete_.Set(); |
| } |
| |
| ++sent_rtp_packets_; |
| |
| // Enough NACKs received, stop dropping packets. |
| if (nacks_left_ <= 0) |
| return SEND_PACKET; |
| |
| // Check if it's time for a new loss burst. |
| if (sent_rtp_packets_ % kPacketsBetweenLossBursts == 0) |
| packets_left_to_drop_ = kLossBurstSize; |
| |
| // Never drop padding packets as those won't be retransmitted. |
| if (packets_left_to_drop_ > 0 && header.paddingLength == 0) { |
| --packets_left_to_drop_; |
| dropped_packets_.insert(header.sequenceNumber); |
| return DROP_PACKET; |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| test::RtcpPacketParser parser; |
| EXPECT_TRUE(parser.Parse(packet, length)); |
| nacks_left_ -= parser.nack()->num_packets(); |
| return SEND_PACKET; |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << "Timed out waiting for packets to be NACKed, retransmitted and " |
| "rendered."; |
| } |
| |
| rtc::CriticalSection crit_; |
| std::set<uint16_t> dropped_packets_; |
| std::set<uint16_t> retransmitted_packets_; |
| uint64_t sent_rtp_packets_; |
| int packets_left_to_drop_; |
| int nacks_left_ GUARDED_BY(&crit_); |
| } test; |
| |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, ReceivesNackAndRetransmitsAudio) { |
| class NackObserver : public test::EndToEndTest { |
| public: |
| NackObserver() |
| : EndToEndTest(kLongTimeoutMs), |
| local_ssrc_(0), |
| remote_ssrc_(0), |
| receive_transport_(nullptr) {} |
| |
| private: |
| size_t GetNumVideoStreams() const override { return 0; } |
| size_t GetNumAudioStreams() const override { return 1; } |
| |
| test::PacketTransport* CreateReceiveTransport( |
| test::SingleThreadedTaskQueueForTesting* task_queue) override { |
| test::PacketTransport* receive_transport = new test::PacketTransport( |
| task_queue, nullptr, this, test::PacketTransport::kReceiver, |
| payload_type_map_, FakeNetworkPipe::Config()); |
| receive_transport_ = receive_transport; |
| return receive_transport; |
| } |
| |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| |
| if (!sequence_number_to_retransmit_) { |
| sequence_number_to_retransmit_ = |
| rtc::Optional<uint16_t>(header.sequenceNumber); |
| |
| // Don't ask for retransmission straight away, may be deduped in pacer. |
| } else if (header.sequenceNumber == *sequence_number_to_retransmit_) { |
| observation_complete_.Set(); |
| } else { |
| // Send a NACK as often as necessary until retransmission is received. |
| rtcp::Nack nack; |
| nack.SetSenderSsrc(local_ssrc_); |
| nack.SetMediaSsrc(remote_ssrc_); |
| uint16_t nack_list[] = {*sequence_number_to_retransmit_}; |
| nack.SetPacketIds(nack_list, 1); |
| rtc::Buffer buffer = nack.Build(); |
| |
| EXPECT_TRUE(receive_transport_->SendRtcp(buffer.data(), buffer.size())); |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| void ModifyAudioConfigs( |
| AudioSendStream::Config* send_config, |
| std::vector<AudioReceiveStream::Config>* receive_configs) override { |
| send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| local_ssrc_ = (*receive_configs)[0].rtp.local_ssrc; |
| remote_ssrc_ = (*receive_configs)[0].rtp.remote_ssrc; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << "Timed out waiting for packets to be NACKed, retransmitted and " |
| "rendered."; |
| } |
| |
| uint32_t local_ssrc_; |
| uint32_t remote_ssrc_; |
| Transport* receive_transport_; |
| rtc::Optional<uint16_t> sequence_number_to_retransmit_; |
| } test; |
| |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, ReceivesUlpfec) { |
| class UlpfecRenderObserver : public test::EndToEndTest, |
| public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| UlpfecRenderObserver() |
| : EndToEndTest(kDefaultTimeoutMs), |
| encoder_(VP8Encoder::Create()), |
| random_(0xcafef00d1), |
| num_packets_sent_(0) {} |
| |
| private: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| |
| EXPECT_TRUE(header.payloadType == kVideoSendPayloadType || |
| header.payloadType == kRedPayloadType) |
| << "Unknown payload type received."; |
| EXPECT_EQ(kVideoSendSsrcs[0], header.ssrc) << "Unknown SSRC received."; |
| |
| // Parse RED header. |
| int encapsulated_payload_type = -1; |
| if (header.payloadType == kRedPayloadType) { |
| encapsulated_payload_type = |
| static_cast<int>(packet[header.headerLength]); |
| |
| EXPECT_TRUE(encapsulated_payload_type == kVideoSendPayloadType || |
| encapsulated_payload_type == kUlpfecPayloadType) |
| << "Unknown encapsulated payload type received."; |
| } |
| |
| // To minimize test flakiness, always let ULPFEC packets through. |
| if (encapsulated_payload_type == kUlpfecPayloadType) { |
| return SEND_PACKET; |
| } |
| |
| // Simulate 5% video packet loss after rampup period. Record the |
| // corresponding timestamps that were dropped. |
| if (num_packets_sent_++ > 100 && random_.Rand(1, 100) <= 5) { |
| if (encapsulated_payload_type == kVideoSendPayloadType) { |
| dropped_sequence_numbers_.insert(header.sequenceNumber); |
| dropped_timestamps_.insert(header.timestamp); |
| } |
| return DROP_PACKET; |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| void OnFrame(const VideoFrame& video_frame) override { |
| rtc::CritScope lock(&crit_); |
| // Rendering frame with timestamp of packet that was dropped -> FEC |
| // protection worked. |
| auto it = dropped_timestamps_.find(video_frame.timestamp()); |
| if (it != dropped_timestamps_.end()) { |
| observation_complete_.Set(); |
| } |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| // Use VP8 instead of FAKE, since the latter does not have PictureID |
| // in the packetization headers. |
| send_config->encoder_settings.encoder = encoder_.get(); |
| send_config->encoder_settings.payload_name = "VP8"; |
| send_config->encoder_settings.payload_type = kVideoSendPayloadType; |
| VideoReceiveStream::Decoder decoder = |
| test::CreateMatchingDecoder(send_config->encoder_settings); |
| decoder_.reset(decoder.decoder); |
| (*receive_configs)[0].decoders.clear(); |
| (*receive_configs)[0].decoders.push_back(decoder); |
| |
| // Enable ULPFEC over RED. |
| send_config->rtp.ulpfec.red_payload_type = kRedPayloadType; |
| send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType; |
| (*receive_configs)[0].rtp.ulpfec.red_payload_type = kRedPayloadType; |
| (*receive_configs)[0].rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType; |
| |
| (*receive_configs)[0].renderer = this; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << "Timed out waiting for dropped frames to be rendered."; |
| } |
| |
| rtc::CriticalSection crit_; |
| std::unique_ptr<VideoEncoder> encoder_; |
| std::unique_ptr<VideoDecoder> decoder_; |
| std::set<uint32_t> dropped_sequence_numbers_ GUARDED_BY(crit_); |
| // Several packets can have the same timestamp. |
| std::multiset<uint32_t> dropped_timestamps_ GUARDED_BY(crit_); |
| Random random_; |
| int num_packets_sent_ GUARDED_BY(crit_); |
| } test; |
| |
| RunBaseTest(&test); |
| } |
| |
| class FlexfecRenderObserver : public test::EndToEndTest, |
| public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| static constexpr uint32_t kVideoLocalSsrc = 123; |
| static constexpr uint32_t kFlexfecLocalSsrc = 456; |
| |
| explicit FlexfecRenderObserver(bool enable_nack, bool expect_flexfec_rtcp) |
| : test::EndToEndTest(test::CallTest::kDefaultTimeoutMs), |
| enable_nack_(enable_nack), |
| expect_flexfec_rtcp_(expect_flexfec_rtcp), |
| received_flexfec_rtcp_(false), |
| random_(0xcafef00d1), |
| num_packets_sent_(0) {} |
| |
| size_t GetNumFlexfecStreams() const override { return 1; } |
| |
| private: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| |
| EXPECT_TRUE(header.payloadType == |
| test::CallTest::kFakeVideoSendPayloadType || |
| header.payloadType == test::CallTest::kFlexfecPayloadType || |
| (enable_nack_ && |
| header.payloadType == test::CallTest::kSendRtxPayloadType)) |
| << "Unknown payload type received."; |
| EXPECT_TRUE( |
| header.ssrc == test::CallTest::kVideoSendSsrcs[0] || |
| header.ssrc == test::CallTest::kFlexfecSendSsrc || |
| (enable_nack_ && header.ssrc == test::CallTest::kSendRtxSsrcs[0])) |
| << "Unknown SSRC received."; |
| |
| // To reduce test flakiness, always let FlexFEC packets through. |
| if (header.payloadType == test::CallTest::kFlexfecPayloadType) { |
| EXPECT_EQ(test::CallTest::kFlexfecSendSsrc, header.ssrc); |
| |
| return SEND_PACKET; |
| } |
| |
| // To reduce test flakiness, always let RTX packets through. |
| if (header.payloadType == test::CallTest::kSendRtxPayloadType) { |
| EXPECT_EQ(test::CallTest::kSendRtxSsrcs[0], header.ssrc); |
| |
| // Parse RTX header. |
| uint16_t original_sequence_number = |
| ByteReader<uint16_t>::ReadBigEndian(&packet[header.headerLength]); |
| |
| // From the perspective of FEC, a retransmitted packet is no longer |
| // dropped, so remove it from list of dropped packets. |
| auto seq_num_it = |
| dropped_sequence_numbers_.find(original_sequence_number); |
| if (seq_num_it != dropped_sequence_numbers_.end()) { |
| dropped_sequence_numbers_.erase(seq_num_it); |
| auto ts_it = dropped_timestamps_.find(header.timestamp); |
| EXPECT_NE(ts_it, dropped_timestamps_.end()); |
| dropped_timestamps_.erase(ts_it); |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| // Simulate 5% video packet loss after rampup period. Record the |
| // corresponding timestamps that were dropped. |
| if (num_packets_sent_++ > 100 && random_.Rand(1, 100) <= 5) { |
| EXPECT_EQ(test::CallTest::kFakeVideoSendPayloadType, header.payloadType); |
| EXPECT_EQ(test::CallTest::kVideoSendSsrcs[0], header.ssrc); |
| |
| dropped_sequence_numbers_.insert(header.sequenceNumber); |
| dropped_timestamps_.insert(header.timestamp); |
| |
| return DROP_PACKET; |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* data, size_t length) override { |
| test::RtcpPacketParser parser; |
| |
| parser.Parse(data, length); |
| if (parser.sender_ssrc() == kFlexfecLocalSsrc) { |
| EXPECT_EQ(1, parser.receiver_report()->num_packets()); |
| const std::vector<rtcp::ReportBlock>& report_blocks = |
| parser.receiver_report()->report_blocks(); |
| if (!report_blocks.empty()) { |
| EXPECT_EQ(1U, report_blocks.size()); |
| EXPECT_EQ(test::CallTest::kFlexfecSendSsrc, |
| report_blocks[0].source_ssrc()); |
| rtc::CritScope lock(&crit_); |
| received_flexfec_rtcp_ = true; |
| } |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| test::PacketTransport* CreateSendTransport( |
| test::SingleThreadedTaskQueueForTesting* task_queue, |
| Call* sender_call) override { |
| // At low RTT (< kLowRttNackMs) -> NACK only, no FEC. |
| const int kNetworkDelayMs = 100; |
| FakeNetworkPipe::Config config; |
| config.queue_delay_ms = kNetworkDelayMs; |
| return new test::PacketTransport(task_queue, sender_call, this, |
| test::PacketTransport::kSender, |
| test::CallTest::payload_type_map_, config); |
| } |
| |
| void OnFrame(const VideoFrame& video_frame) override { |
| EXPECT_EQ(kVideoRotation_90, video_frame.rotation()); |
| |
| rtc::CritScope lock(&crit_); |
| // Rendering frame with timestamp of packet that was dropped -> FEC |
| // protection worked. |
| auto it = dropped_timestamps_.find(video_frame.timestamp()); |
| if (it != dropped_timestamps_.end()) { |
| if (!expect_flexfec_rtcp_ || received_flexfec_rtcp_) { |
| observation_complete_.Set(); |
| } |
| } |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| (*receive_configs)[0].rtp.local_ssrc = kVideoLocalSsrc; |
| (*receive_configs)[0].renderer = this; |
| |
| if (enable_nack_) { |
| send_config->rtp.nack.rtp_history_ms = test::CallTest::kNackRtpHistoryMs; |
| send_config->rtp.ulpfec.red_rtx_payload_type = |
| test::CallTest::kRtxRedPayloadType; |
| send_config->rtp.rtx.ssrcs.push_back(test::CallTest::kSendRtxSsrcs[0]); |
| send_config->rtp.rtx.payload_type = test::CallTest::kSendRtxPayloadType; |
| |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = |
| test::CallTest::kNackRtpHistoryMs; |
| (*receive_configs)[0].rtp.ulpfec.red_rtx_payload_type = |
| test::CallTest::kRtxRedPayloadType; |
| |
| (*receive_configs)[0].rtp.rtx_ssrc = test::CallTest::kSendRtxSsrcs[0]; |
| (*receive_configs)[0] |
| .rtp |
| .rtx_associated_payload_types[test::CallTest::kSendRtxPayloadType] = |
| test::CallTest::kVideoSendPayloadType; |
| } |
| } |
| |
| void OnFrameGeneratorCapturerCreated( |
| test::FrameGeneratorCapturer* frame_generator_capturer) override { |
| frame_generator_capturer->SetFakeRotation(kVideoRotation_90); |
| } |
| |
| void ModifyFlexfecConfigs( |
| std::vector<FlexfecReceiveStream::Config>* receive_configs) override { |
| (*receive_configs)[0].local_ssrc = kFlexfecLocalSsrc; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << "Timed out waiting for dropped frames to be rendered."; |
| } |
| |
| rtc::CriticalSection crit_; |
| std::set<uint32_t> dropped_sequence_numbers_ GUARDED_BY(crit_); |
| // Several packets can have the same timestamp. |
| std::multiset<uint32_t> dropped_timestamps_ GUARDED_BY(crit_); |
| const bool enable_nack_; |
| const bool expect_flexfec_rtcp_; |
| bool received_flexfec_rtcp_ GUARDED_BY(crit_); |
| Random random_; |
| int num_packets_sent_; |
| }; |
| |
| TEST_F(EndToEndTest, RecoversWithFlexfec) { |
| FlexfecRenderObserver test(false, false); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, RecoversWithFlexfecAndNack) { |
| FlexfecRenderObserver test(true, false); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, RecoversWithFlexfecAndSendsCorrespondingRtcp) { |
| FlexfecRenderObserver test(false, true); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, ReceivedUlpfecPacketsNotNacked) { |
| class UlpfecNackObserver : public test::EndToEndTest { |
| public: |
| UlpfecNackObserver() |
| : EndToEndTest(kDefaultTimeoutMs), |
| state_(kFirstPacket), |
| ulpfec_sequence_number_(0), |
| has_last_sequence_number_(false), |
| last_sequence_number_(0), |
| encoder_(VP8Encoder::Create()), |
| decoder_(VP8Decoder::Create()) {} |
| |
| private: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock_(&crit_); |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| |
| int encapsulated_payload_type = -1; |
| if (header.payloadType == kRedPayloadType) { |
| encapsulated_payload_type = |
| static_cast<int>(packet[header.headerLength]); |
| if (encapsulated_payload_type != kFakeVideoSendPayloadType) |
| EXPECT_EQ(kUlpfecPayloadType, encapsulated_payload_type); |
| } else { |
| EXPECT_EQ(kFakeVideoSendPayloadType, header.payloadType); |
| } |
| |
| if (has_last_sequence_number_ && |
| !IsNewerSequenceNumber(header.sequenceNumber, |
| last_sequence_number_)) { |
| // Drop retransmitted packets. |
| return DROP_PACKET; |
| } |
| last_sequence_number_ = header.sequenceNumber; |
| has_last_sequence_number_ = true; |
| |
| bool ulpfec_packet = encapsulated_payload_type == kUlpfecPayloadType; |
| switch (state_) { |
| case kFirstPacket: |
| state_ = kDropEveryOtherPacketUntilUlpfec; |
| break; |
| case kDropEveryOtherPacketUntilUlpfec: |
| if (ulpfec_packet) { |
| state_ = kDropAllMediaPacketsUntilUlpfec; |
| } else if (header.sequenceNumber % 2 == 0) { |
| return DROP_PACKET; |
| } |
| break; |
| case kDropAllMediaPacketsUntilUlpfec: |
| if (!ulpfec_packet) |
| return DROP_PACKET; |
| ulpfec_sequence_number_ = header.sequenceNumber; |
| state_ = kDropOneMediaPacket; |
| break; |
| case kDropOneMediaPacket: |
| if (ulpfec_packet) |
| return DROP_PACKET; |
| state_ = kPassOneMediaPacket; |
| return DROP_PACKET; |
| break; |
| case kPassOneMediaPacket: |
| if (ulpfec_packet) |
| return DROP_PACKET; |
| // Pass one media packet after dropped packet after last FEC, |
| // otherwise receiver might never see a seq_no after |
| // |ulpfec_sequence_number_| |
| state_ = kVerifyUlpfecPacketNotInNackList; |
| break; |
| case kVerifyUlpfecPacketNotInNackList: |
| // Continue to drop packets. Make sure no frame can be decoded. |
| if (ulpfec_packet || header.sequenceNumber % 2 == 0) |
| return DROP_PACKET; |
| break; |
| } |
| return SEND_PACKET; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock_(&crit_); |
| if (state_ == kVerifyUlpfecPacketNotInNackList) { |
| test::RtcpPacketParser rtcp_parser; |
| rtcp_parser.Parse(packet, length); |
| const std::vector<uint16_t>& nacks = rtcp_parser.nack()->packet_ids(); |
| EXPECT_TRUE(std::find(nacks.begin(), nacks.end(), |
| ulpfec_sequence_number_) == nacks.end()) |
| << "Got nack for ULPFEC packet"; |
| if (!nacks.empty() && |
| IsNewerSequenceNumber(nacks.back(), ulpfec_sequence_number_)) { |
| observation_complete_.Set(); |
| } |
| } |
| return SEND_PACKET; |
| } |
| |
| test::PacketTransport* CreateSendTransport( |
| test::SingleThreadedTaskQueueForTesting* task_queue, |
| Call* sender_call) override { |
| // At low RTT (< kLowRttNackMs) -> NACK only, no FEC. |
| // Configure some network delay. |
| const int kNetworkDelayMs = 50; |
| FakeNetworkPipe::Config config; |
| config.queue_delay_ms = kNetworkDelayMs; |
| return new test::PacketTransport(task_queue, sender_call, this, |
| test::PacketTransport::kSender, |
| payload_type_map_, config); |
| } |
| |
| // TODO(holmer): Investigate why we don't send FEC packets when the bitrate |
| // is 10 kbps. |
| Call::Config GetSenderCallConfig() override { |
| Call::Config config(event_log_.get()); |
| const int kMinBitrateBps = 30000; |
| config.bitrate_config.min_bitrate_bps = kMinBitrateBps; |
| return config; |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| // Configure hybrid NACK/FEC. |
| send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| send_config->rtp.ulpfec.red_payload_type = kRedPayloadType; |
| send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType; |
| // Set codec to VP8, otherwise NACK/FEC hybrid will be disabled. |
| send_config->encoder_settings.encoder = encoder_.get(); |
| send_config->encoder_settings.payload_name = "VP8"; |
| send_config->encoder_settings.payload_type = kFakeVideoSendPayloadType; |
| |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].rtp.ulpfec.red_payload_type = kRedPayloadType; |
| (*receive_configs)[0].rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType; |
| |
| (*receive_configs)[0].decoders.resize(1); |
| (*receive_configs)[0].decoders[0].payload_type = |
| send_config->encoder_settings.payload_type; |
| (*receive_configs)[0].decoders[0].payload_name = |
| send_config->encoder_settings.payload_name; |
| (*receive_configs)[0].decoders[0].decoder = decoder_.get(); |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << "Timed out while waiting for FEC packets to be received."; |
| } |
| |
| enum { |
| kFirstPacket, |
| kDropEveryOtherPacketUntilUlpfec, |
| kDropAllMediaPacketsUntilUlpfec, |
| kDropOneMediaPacket, |
| kPassOneMediaPacket, |
| kVerifyUlpfecPacketNotInNackList, |
| } state_; |
| |
| rtc::CriticalSection crit_; |
| uint16_t ulpfec_sequence_number_ GUARDED_BY(&crit_); |
| bool has_last_sequence_number_; |
| uint16_t last_sequence_number_; |
| std::unique_ptr<webrtc::VideoEncoder> encoder_; |
| std::unique_ptr<webrtc::VideoDecoder> decoder_; |
| } test; |
| |
| RunBaseTest(&test); |
| } |
| |
| // This test drops second RTP packet with a marker bit set, makes sure it's |
| // retransmitted and renders. Retransmission SSRCs are also checked. |
| void EndToEndTest::DecodesRetransmittedFrame(bool enable_rtx, bool enable_red) { |
| static const int kDroppedFrameNumber = 10; |
| class RetransmissionObserver : public test::EndToEndTest, |
| public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| RetransmissionObserver(bool enable_rtx, bool enable_red) |
| : EndToEndTest(kDefaultTimeoutMs), |
| payload_type_(GetPayloadType(false, enable_red)), |
| retransmission_ssrc_(enable_rtx ? kSendRtxSsrcs[0] |
| : kVideoSendSsrcs[0]), |
| retransmission_payload_type_(GetPayloadType(enable_rtx, enable_red)), |
| encoder_(VP8Encoder::Create()), |
| marker_bits_observed_(0), |
| retransmitted_timestamp_(0) {} |
| |
| private: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| |
| // Ignore padding-only packets over RTX. |
| if (header.payloadType != payload_type_) { |
| EXPECT_EQ(retransmission_ssrc_, header.ssrc); |
| if (length == header.headerLength + header.paddingLength) |
| return SEND_PACKET; |
| } |
| |
| if (header.timestamp == retransmitted_timestamp_) { |
| EXPECT_EQ(retransmission_ssrc_, header.ssrc); |
| EXPECT_EQ(retransmission_payload_type_, header.payloadType); |
| return SEND_PACKET; |
| } |
| |
| // Found the final packet of the frame to inflict loss to, drop this and |
| // expect a retransmission. |
| if (header.payloadType == payload_type_ && header.markerBit && |
| ++marker_bits_observed_ == kDroppedFrameNumber) { |
| // This should be the only dropped packet. |
| EXPECT_EQ(0u, retransmitted_timestamp_); |
| retransmitted_timestamp_ = header.timestamp; |
| if (std::find(rendered_timestamps_.begin(), rendered_timestamps_.end(), |
| retransmitted_timestamp_) != rendered_timestamps_.end()) { |
| // Frame was rendered before last packet was scheduled for sending. |
| // This is extremly rare but possible scenario because prober able to |
| // resend packet before it was send. |
| // TODO(danilchap): Remove this corner case when prober would not be |
| // able to sneak in between packet saved to history for resending and |
| // pacer notified about existance of that packet for sending. |
| // See https://bugs.chromium.org/p/webrtc/issues/detail?id=5540 for |
| // details. |
| observation_complete_.Set(); |
| } |
| return DROP_PACKET; |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| void OnFrame(const VideoFrame& frame) override { |
| EXPECT_EQ(kVideoRotation_90, frame.rotation()); |
| { |
| rtc::CritScope lock(&crit_); |
| if (frame.timestamp() == retransmitted_timestamp_) |
| observation_complete_.Set(); |
| rendered_timestamps_.push_back(frame.timestamp()); |
| } |
| orig_renderer_->OnFrame(frame); |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| |
| // Insert ourselves into the rendering pipeline. |
| RTC_DCHECK(!orig_renderer_); |
| orig_renderer_ = (*receive_configs)[0].renderer; |
| RTC_DCHECK(orig_renderer_); |
| (*receive_configs)[0].disable_prerenderer_smoothing = true; |
| (*receive_configs)[0].renderer = this; |
| |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| |
| if (payload_type_ == kRedPayloadType) { |
| send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType; |
| send_config->rtp.ulpfec.red_payload_type = kRedPayloadType; |
| if (retransmission_ssrc_ == kSendRtxSsrcs[0]) |
| send_config->rtp.ulpfec.red_rtx_payload_type = kRtxRedPayloadType; |
| (*receive_configs)[0].rtp.ulpfec.ulpfec_payload_type = |
| send_config->rtp.ulpfec.ulpfec_payload_type; |
| (*receive_configs)[0].rtp.ulpfec.red_payload_type = |
| send_config->rtp.ulpfec.red_payload_type; |
| (*receive_configs)[0].rtp.ulpfec.red_rtx_payload_type = |
| send_config->rtp.ulpfec.red_rtx_payload_type; |
| } |
| |
| if (retransmission_ssrc_ == kSendRtxSsrcs[0]) { |
| send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]); |
| send_config->rtp.rtx.payload_type = kSendRtxPayloadType; |
| (*receive_configs)[0].rtp.rtx_ssrc = kSendRtxSsrcs[0]; |
| (*receive_configs)[0] |
| .rtp.rtx_associated_payload_types[kSendRtxPayloadType] = |
| payload_type_; |
| } |
| // Configure encoding and decoding with VP8, since generic packetization |
| // doesn't support FEC with NACK. |
| RTC_DCHECK_EQ(1, (*receive_configs)[0].decoders.size()); |
| send_config->encoder_settings.encoder = encoder_.get(); |
| send_config->encoder_settings.payload_name = "VP8"; |
| (*receive_configs)[0].decoders[0].payload_name = "VP8"; |
| } |
| |
| void OnFrameGeneratorCapturerCreated( |
| test::FrameGeneratorCapturer* frame_generator_capturer) override { |
| frame_generator_capturer->SetFakeRotation(kVideoRotation_90); |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << "Timed out while waiting for retransmission to render."; |
| } |
| |
| int GetPayloadType(bool use_rtx, bool use_red) { |
| if (use_red) { |
| if (use_rtx) |
| return kRtxRedPayloadType; |
| return kRedPayloadType; |
| } |
| if (use_rtx) |
| return kSendRtxPayloadType; |
| return kFakeVideoSendPayloadType; |
| } |
| |
| rtc::CriticalSection crit_; |
| rtc::VideoSinkInterface<VideoFrame>* orig_renderer_ = nullptr; |
| const int payload_type_; |
| const uint32_t retransmission_ssrc_; |
| const int retransmission_payload_type_; |
| std::unique_ptr<VideoEncoder> encoder_; |
| const std::string payload_name_; |
| int marker_bits_observed_; |
| uint32_t retransmitted_timestamp_ GUARDED_BY(&crit_); |
| std::vector<uint32_t> rendered_timestamps_ GUARDED_BY(&crit_); |
| } test(enable_rtx, enable_red); |
| |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, DecodesRetransmittedFrame) { |
| DecodesRetransmittedFrame(false, false); |
| } |
| |
| TEST_F(EndToEndTest, DecodesRetransmittedFrameOverRtx) { |
| DecodesRetransmittedFrame(true, false); |
| } |
| |
| TEST_F(EndToEndTest, DecodesRetransmittedFrameByRed) { |
| DecodesRetransmittedFrame(false, true); |
| } |
| |
| TEST_F(EndToEndTest, DecodesRetransmittedFrameByRedOverRtx) { |
| DecodesRetransmittedFrame(true, true); |
| } |
| |
| void EndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) { |
| static const int kPacketsToDrop = 1; |
| |
| class PliObserver : public test::EndToEndTest, |
| public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| explicit PliObserver(int rtp_history_ms) |
| : EndToEndTest(kLongTimeoutMs), |
| rtp_history_ms_(rtp_history_ms), |
| nack_enabled_(rtp_history_ms > 0), |
| highest_dropped_timestamp_(0), |
| frames_to_drop_(0), |
| received_pli_(false) {} |
| |
| private: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| |
| // Drop all retransmitted packets to force a PLI. |
| if (header.timestamp <= highest_dropped_timestamp_) |
| return DROP_PACKET; |
| |
| if (frames_to_drop_ > 0) { |
| highest_dropped_timestamp_ = header.timestamp; |
| --frames_to_drop_; |
| return DROP_PACKET; |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| test::RtcpPacketParser parser; |
| EXPECT_TRUE(parser.Parse(packet, length)); |
| if (!nack_enabled_) |
| EXPECT_EQ(0, parser.nack()->num_packets()); |
| if (parser.pli()->num_packets() > 0) |
| received_pli_ = true; |
| return SEND_PACKET; |
| } |
| |
| void OnFrame(const VideoFrame& video_frame) override { |
| rtc::CritScope lock(&crit_); |
| if (received_pli_ && |
| video_frame.timestamp() > highest_dropped_timestamp_) { |
| observation_complete_.Set(); |
| } |
| if (!received_pli_) |
| frames_to_drop_ = kPacketsToDrop; |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| send_config->rtp.nack.rtp_history_ms = rtp_history_ms_; |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = rtp_history_ms_; |
| (*receive_configs)[0].renderer = this; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) << "Timed out waiting for PLI to be " |
| "received and a frame to be " |
| "rendered afterwards."; |
| } |
| |
| rtc::CriticalSection crit_; |
| int rtp_history_ms_; |
| bool nack_enabled_; |
| uint32_t highest_dropped_timestamp_ GUARDED_BY(&crit_); |
| int frames_to_drop_ GUARDED_BY(&crit_); |
| bool received_pli_ GUARDED_BY(&crit_); |
| } test(rtp_history_ms); |
| |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, ReceivesPliAndRecoversWithNack) { |
| ReceivesPliAndRecovers(1000); |
| } |
| |
| TEST_F(EndToEndTest, ReceivesPliAndRecoversWithoutNack) { |
| ReceivesPliAndRecovers(0); |
| } |
| |
| TEST_F(EndToEndTest, UnknownRtpPacketGivesUnknownSsrcReturnCode) { |
| class PacketInputObserver : public PacketReceiver { |
| public: |
| explicit PacketInputObserver(PacketReceiver* receiver) |
| : receiver_(receiver), delivered_packet_(false, false) {} |
| |
| bool Wait() { return delivered_packet_.Wait(kDefaultTimeoutMs); } |
| |
| private: |
| DeliveryStatus DeliverPacket(MediaType media_type, |
| const uint8_t* packet, |
| size_t length, |
| const PacketTime& packet_time) override { |
| if (RtpHeaderParser::IsRtcp(packet, length)) { |
| return receiver_->DeliverPacket(media_type, packet, length, |
| packet_time); |
| } else { |
| DeliveryStatus delivery_status = |
| receiver_->DeliverPacket(media_type, packet, length, packet_time); |
| EXPECT_EQ(DELIVERY_UNKNOWN_SSRC, delivery_status); |
| delivered_packet_.Set(); |
| return delivery_status; |
| } |
| } |
| |
| PacketReceiver* receiver_; |
| rtc::Event delivered_packet_; |
| }; |
| |
| std::unique_ptr<test::DirectTransport> send_transport; |
| std::unique_ptr<test::DirectTransport> receive_transport; |
| std::unique_ptr<PacketInputObserver> input_observer; |
| |
| task_queue_.SendTask([this, &send_transport, &receive_transport, |
| &input_observer]() { |
| CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get())); |
| |
| send_transport = rtc::MakeUnique<test::DirectTransport>( |
| &task_queue_, sender_call_.get(), payload_type_map_); |
| receive_transport = rtc::MakeUnique<test::DirectTransport>( |
| &task_queue_, receiver_call_.get(), payload_type_map_); |
| input_observer = |
| rtc::MakeUnique<PacketInputObserver>(receiver_call_->Receiver()); |
| send_transport->SetReceiver(input_observer.get()); |
| receive_transport->SetReceiver(sender_call_->Receiver()); |
| |
| CreateSendConfig(1, 0, 0, send_transport.get()); |
| CreateMatchingReceiveConfigs(receive_transport.get()); |
| |
| CreateVideoStreams(); |
| CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth, |
| kDefaultHeight); |
| Start(); |
| |
| receiver_call_->DestroyVideoReceiveStream(video_receive_streams_[0]); |
| video_receive_streams_.clear(); |
| }); |
| |
| // Wait() waits for a received packet. |
| EXPECT_TRUE(input_observer->Wait()); |
| |
| task_queue_.SendTask([this, &send_transport, &receive_transport]() { |
| Stop(); |
| DestroyStreams(); |
| send_transport.reset(); |
| receive_transport.reset(); |
| DestroyCalls(); |
| }); |
| } |
| |
| void EndToEndTest::RespectsRtcpMode(RtcpMode rtcp_mode) { |
| static const int kNumCompoundRtcpPacketsToObserve = 10; |
| class RtcpModeObserver : public test::EndToEndTest { |
| public: |
| explicit RtcpModeObserver(RtcpMode rtcp_mode) |
| : EndToEndTest(kDefaultTimeoutMs), |
| rtcp_mode_(rtcp_mode), |
| sent_rtp_(0), |
| sent_rtcp_(0) {} |
| |
| private: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| if (++sent_rtp_ % 3 == 0) |
| return DROP_PACKET; |
| |
| return SEND_PACKET; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| ++sent_rtcp_; |
| test::RtcpPacketParser parser; |
| EXPECT_TRUE(parser.Parse(packet, length)); |
| |
| EXPECT_EQ(0, parser.sender_report()->num_packets()); |
| |
| switch (rtcp_mode_) { |
| case RtcpMode::kCompound: |
| // TODO(holmer): We shouldn't send transport feedback alone if |
| // compound RTCP is negotiated. |
| if (parser.receiver_report()->num_packets() == 0 && |
| parser.transport_feedback()->num_packets() == 0) { |
| ADD_FAILURE() << "Received RTCP packet without receiver report for " |
| "RtcpMode::kCompound."; |
| observation_complete_.Set(); |
| } |
| |
| if (sent_rtcp_ >= kNumCompoundRtcpPacketsToObserve) |
| observation_complete_.Set(); |
| |
| break; |
| case RtcpMode::kReducedSize: |
| if (parser.receiver_report()->num_packets() == 0) |
| observation_complete_.Set(); |
| break; |
| case RtcpMode::kOff: |
| RTC_NOTREACHED(); |
| break; |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].rtp.rtcp_mode = rtcp_mode_; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) |
| << (rtcp_mode_ == RtcpMode::kCompound |
| ? "Timed out before observing enough compound packets." |
| : "Timed out before receiving a non-compound RTCP packet."); |
| } |
| |
| RtcpMode rtcp_mode_; |
| rtc::CriticalSection crit_; |
| // Must be protected since RTCP can be sent by both the process thread |
| // and the pacer thread. |
| int sent_rtp_ GUARDED_BY(&crit_); |
| int sent_rtcp_ GUARDED_BY(&crit_); |
| } test(rtcp_mode); |
| |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, UsesRtcpCompoundMode) { |
| RespectsRtcpMode(RtcpMode::kCompound); |
| } |
| |
| TEST_F(EndToEndTest, UsesRtcpReducedSizeMode) { |
| RespectsRtcpMode(RtcpMode::kReducedSize); |
| } |
| |
| // Test sets up a Call multiple senders with different resolutions and SSRCs. |
| // Another is set up to receive all three of these with different renderers. |
| class MultiStreamTest { |
| public: |
| static constexpr size_t kNumStreams = 3; |
| const uint8_t kVideoPayloadType = 124; |
| const std::map<uint8_t, MediaType> payload_type_map_ = { |
| {kVideoPayloadType, MediaType::VIDEO}}; |
| |
| struct CodecSettings { |
| uint32_t ssrc; |
| int width; |
| int height; |
| } codec_settings[kNumStreams]; |
| |
| explicit MultiStreamTest(test::SingleThreadedTaskQueueForTesting* task_queue) |
| : task_queue_(task_queue) { |
| // TODO(sprang): Cleanup when msvc supports explicit initializers for array. |
| codec_settings[0] = {1, 640, 480}; |
| codec_settings[1] = {2, 320, 240}; |
| codec_settings[2] = {3, 240, 160}; |
| } |
| |
| virtual ~MultiStreamTest() {} |
| |
| void RunTest() { |
| webrtc::RtcEventLogNullImpl event_log; |
| Call::Config config(&event_log); |
| std::unique_ptr<Call> sender_call; |
| std::unique_ptr<Call> receiver_call; |
| std::unique_ptr<test::DirectTransport> sender_transport; |
| std::unique_ptr<test::DirectTransport> receiver_transport; |
| |
| VideoSendStream* send_streams[kNumStreams]; |
| VideoReceiveStream* receive_streams[kNumStreams]; |
| test::FrameGeneratorCapturer* frame_generators[kNumStreams]; |
| std::vector<std::unique_ptr<VideoDecoder>> allocated_decoders; |
| std::unique_ptr<VideoEncoder> encoders[kNumStreams]; |
| |
| task_queue_->SendTask([&]() { |
| sender_call = rtc::WrapUnique(Call::Create(config)); |
| receiver_call = rtc::WrapUnique(Call::Create(config)); |
| sender_transport = |
| rtc::WrapUnique(CreateSendTransport(task_queue_, sender_call.get())); |
| receiver_transport = rtc::WrapUnique( |
| CreateReceiveTransport(task_queue_, receiver_call.get())); |
| |
| sender_transport->SetReceiver(receiver_call->Receiver()); |
| receiver_transport->SetReceiver(sender_call->Receiver()); |
| |
| for (size_t i = 0; i < kNumStreams; ++i) |
| encoders[i].reset(VP8Encoder::Create()); |
| |
| for (size_t i = 0; i < kNumStreams; ++i) { |
| uint32_t ssrc = codec_settings[i].ssrc; |
| int width = codec_settings[i].width; |
| int height = codec_settings[i].height; |
| |
| VideoSendStream::Config send_config(sender_transport.get()); |
| send_config.rtp.ssrcs.push_back(ssrc); |
| send_config.encoder_settings.encoder = encoders[i].get(); |
| send_config.encoder_settings.payload_name = "VP8"; |
| send_config.encoder_settings.payload_type = kVideoPayloadType; |
| VideoEncoderConfig encoder_config; |
| test::FillEncoderConfiguration(1, &encoder_config); |
| encoder_config.max_bitrate_bps = 100000; |
| |
| UpdateSendConfig(i, &send_config, &encoder_config, |
| &frame_generators[i]); |
| |
| send_streams[i] = sender_call->CreateVideoSendStream( |
| send_config.Copy(), encoder_config.Copy()); |
| send_streams[i]->Start(); |
| |
| VideoReceiveStream::Config receive_config(receiver_transport.get()); |
| receive_config.rtp.remote_ssrc = ssrc; |
| receive_config.rtp.local_ssrc = test::CallTest::kReceiverLocalVideoSsrc; |
| VideoReceiveStream::Decoder decoder = |
| test::CreateMatchingDecoder(send_config.encoder_settings); |
| allocated_decoders.push_back( |
| std::unique_ptr<VideoDecoder>(decoder.decoder)); |
| receive_config.decoders.push_back(decoder); |
| |
| UpdateReceiveConfig(i, &receive_config); |
| |
| receive_streams[i] = |
| receiver_call->CreateVideoReceiveStream(std::move(receive_config)); |
| receive_streams[i]->Start(); |
| |
| frame_generators[i] = test::FrameGeneratorCapturer::Create( |
| width, height, 30, Clock::GetRealTimeClock()); |
| send_streams[i]->SetSource( |
| frame_generators[i], |
| VideoSendStream::DegradationPreference::kMaintainFramerate); |
| frame_generators[i]->Start(); |
| } |
| }); |
| |
| Wait(); |
| |
| task_queue_->SendTask([&]() { |
| for (size_t i = 0; i < kNumStreams; ++i) { |
| frame_generators[i]->Stop(); |
| sender_call->DestroyVideoSendStream(send_streams[i]); |
| receiver_call->DestroyVideoReceiveStream(receive_streams[i]); |
| delete frame_generators[i]; |
| } |
| |
| sender_transport.reset(); |
| receiver_transport.reset(); |
| |
| sender_call.reset(); |
| receiver_call.reset(); |
| }); |
| } |
| |
| protected: |
| virtual void Wait() = 0; |
| // Note: frame_generator is a point-to-pointer, since the actual instance |
| // hasn't been created at the time of this call. Only when packets/frames |
| // start flowing should this be dereferenced. |
| virtual void UpdateSendConfig( |
| size_t stream_index, |
| VideoSendStream::Config* send_config, |
| VideoEncoderConfig* encoder_config, |
| test::FrameGeneratorCapturer** frame_generator) {} |
| virtual void UpdateReceiveConfig(size_t stream_index, |
| VideoReceiveStream::Config* receive_config) { |
| } |
| virtual test::DirectTransport* CreateSendTransport( |
| test::SingleThreadedTaskQueueForTesting* task_queue, |
| Call* sender_call) { |
| return new test::DirectTransport(task_queue, sender_call, |
| payload_type_map_); |
| } |
| virtual test::DirectTransport* CreateReceiveTransport( |
| test::SingleThreadedTaskQueueForTesting* task_queue, |
| Call* receiver_call) { |
| return new test::DirectTransport(task_queue, receiver_call, |
| payload_type_map_); |
| } |
| |
| test::SingleThreadedTaskQueueForTesting* const task_queue_; |
| }; |
| |
| // Each renderer verifies that it receives the expected resolution, and as soon |
| // as every renderer has received a frame, the test finishes. |
| TEST_F(EndToEndTest, SendsAndReceivesMultipleStreams) { |
| class VideoOutputObserver : public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| VideoOutputObserver(const MultiStreamTest::CodecSettings& settings, |
| uint32_t ssrc, |
| test::FrameGeneratorCapturer** frame_generator) |
| : settings_(settings), |
| ssrc_(ssrc), |
| frame_generator_(frame_generator), |
| done_(false, false) {} |
| |
| void OnFrame(const VideoFrame& video_frame) override { |
| EXPECT_EQ(settings_.width, video_frame.width()); |
| EXPECT_EQ(settings_.height, video_frame.height()); |
| (*frame_generator_)->Stop(); |
| done_.Set(); |
| } |
| |
| uint32_t Ssrc() { return ssrc_; } |
| |
| bool Wait() { return done_.Wait(kDefaultTimeoutMs); } |
| |
| private: |
| const MultiStreamTest::CodecSettings& settings_; |
| const uint32_t ssrc_; |
| test::FrameGeneratorCapturer** const frame_generator_; |
| rtc::Event done_; |
| }; |
| |
| class Tester : public MultiStreamTest { |
| public: |
| explicit Tester(test::SingleThreadedTaskQueueForTesting* task_queue) |
| : MultiStreamTest(task_queue) {} |
| virtual ~Tester() {} |
| |
| protected: |
| void Wait() override { |
| for (const auto& observer : observers_) { |
| EXPECT_TRUE(observer->Wait()) << "Time out waiting for from on ssrc " |
| << observer->Ssrc(); |
| } |
| } |
| |
| void UpdateSendConfig( |
| size_t stream_index, |
| VideoSendStream::Config* send_config, |
| VideoEncoderConfig* encoder_config, |
| test::FrameGeneratorCapturer** frame_generator) override { |
| observers_[stream_index].reset(new VideoOutputObserver( |
| codec_settings[stream_index], send_config->rtp.ssrcs.front(), |
| frame_generator)); |
| } |
| |
| void UpdateReceiveConfig( |
| size_t stream_index, |
| VideoReceiveStream::Config* receive_config) override { |
| receive_config->renderer = observers_[stream_index].get(); |
| } |
| |
| private: |
| std::unique_ptr<VideoOutputObserver> observers_[kNumStreams]; |
| } tester(&task_queue_); |
| |
| tester.RunTest(); |
| } |
| |
| TEST_F(EndToEndTest, AssignsTransportSequenceNumbers) { |
| static const int kExtensionId = 5; |
| |
| class RtpExtensionHeaderObserver : public test::DirectTransport { |
| public: |
| RtpExtensionHeaderObserver( |
| test::SingleThreadedTaskQueueForTesting* task_queue, |
| Call* sender_call, |
| const uint32_t& first_media_ssrc, |
| const std::map<uint32_t, uint32_t>& ssrc_map, |
| const std::map<uint8_t, MediaType>& payload_type_map) |
| : DirectTransport(task_queue, sender_call, payload_type_map), |
| done_(false, false), |
| parser_(RtpHeaderParser::Create()), |
| first_media_ssrc_(first_media_ssrc), |
| rtx_to_media_ssrcs_(ssrc_map), |
| padding_observed_(false), |
| rtx_padding_observed_(false), |
| retransmit_observed_(false), |
| started_(false) { |
| parser_->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber, |
| kExtensionId); |
| } |
| virtual ~RtpExtensionHeaderObserver() {} |
| |
| bool SendRtp(const uint8_t* data, |
| size_t length, |
| const PacketOptions& options) override { |
| { |
| rtc::CritScope cs(&lock_); |
| |
| if (IsDone()) |
| return false; |
| |
| if (started_) { |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(data, length, &header)); |
| bool drop_packet = false; |
| |
| EXPECT_TRUE(header.extension.hasTransportSequenceNumber); |
| EXPECT_EQ(options.packet_id, |
| header.extension.transportSequenceNumber); |
| if (!streams_observed_.empty()) { |
| // Unwrap packet id and verify uniqueness. |
| int64_t packet_id = unwrapper_.Unwrap(options.packet_id); |
| EXPECT_TRUE(received_packed_ids_.insert(packet_id).second); |
| } |
| |
| // Drop (up to) every 17th packet, so we get retransmits. |
| // Only drop media, and not on the first stream (otherwise it will be |
| // hard to distinguish from padding, which is always sent on the first |
| // stream). |
| if (header.payloadType != kSendRtxPayloadType && |
| header.ssrc != first_media_ssrc_ && |
| header.extension.transportSequenceNumber % 17 == 0) { |
| dropped_seq_[header.ssrc].insert(header.sequenceNumber); |
| drop_packet = true; |
| } |
| |
| if (header.payloadType == kSendRtxPayloadType) { |
| uint16_t original_sequence_number = |
| ByteReader<uint16_t>::ReadBigEndian(&data[header.headerLength]); |
| uint32_t original_ssrc = |
| rtx_to_media_ssrcs_.find(header.ssrc)->second; |
| std::set<uint16_t>* seq_no_map = &dropped_seq_[original_ssrc]; |
| auto it = seq_no_map->find(original_sequence_number); |
| if (it != seq_no_map->end()) { |
| retransmit_observed_ = true; |
| seq_no_map->erase(it); |
| } else { |
| rtx_padding_observed_ = true; |
| } |
| } else { |
| streams_observed_.insert(header.ssrc); |
| } |
| |
| if (IsDone()) |
| done_.Set(); |
| |
| if (drop_packet) |
| return true; |
| } |
| } |
| |
| return test::DirectTransport::SendRtp(data, length, options); |
| } |
| |
| bool IsDone() { |
| bool observed_types_ok = |
| streams_observed_.size() == MultiStreamTest::kNumStreams && |
| retransmit_observed_ && rtx_padding_observed_; |
| if (!observed_types_ok) |
| return false; |
| // We should not have any gaps in the sequence number range. |
| size_t seqno_range = |
| *received_packed_ids_.rbegin() - *received_packed_ids_.begin() + 1; |
| return seqno_range == received_packed_ids_.size(); |
| } |
| |
| bool Wait() { |
| { |
| // Can't be sure until this point that rtx_to_media_ssrcs_ etc have |
| // been initialized and are OK to read. |
| rtc::CritScope cs(&lock_); |
| started_ = true; |
| } |
| return done_.Wait(kDefaultTimeoutMs); |
| } |
| |
| rtc::CriticalSection lock_; |
| rtc::Event done_; |
| std::unique_ptr<RtpHeaderParser> parser_; |
| SequenceNumberUnwrapper unwrapper_; |
| std::set<int64_t> received_packed_ids_; |
| std::set<uint32_t> streams_observed_; |
| std::map<uint32_t, std::set<uint16_t>> dropped_seq_; |
| const uint32_t& first_media_ssrc_; |
| const std::map<uint32_t, uint32_t>& rtx_to_media_ssrcs_; |
| bool padding_observed_; |
| bool rtx_padding_observed_; |
| bool retransmit_observed_; |
| bool started_; |
| }; |
| |
| class TransportSequenceNumberTester : public MultiStreamTest { |
| public: |
| explicit TransportSequenceNumberTester( |
| test::SingleThreadedTaskQueueForTesting* task_queue) |
| : MultiStreamTest(task_queue), |
| first_media_ssrc_(0), |
| observer_(nullptr) {} |
| virtual ~TransportSequenceNumberTester() {} |
| |
| protected: |
| void Wait() override { |
| RTC_DCHECK(observer_); |
| EXPECT_TRUE(observer_->Wait()); |
| } |
| |
| void UpdateSendConfig( |
| size_t stream_index, |
| VideoSendStream::Config* send_config, |
| VideoEncoderConfig* encoder_config, |
| test::FrameGeneratorCapturer** frame_generator) override { |
| send_config->rtp.extensions.clear(); |
| send_config->rtp.extensions.push_back(RtpExtension( |
| RtpExtension::kTransportSequenceNumberUri, kExtensionId)); |
| |
| // Force some padding to be sent. Note that since we do send media |
| // packets we can not guarantee that a padding only packet is sent. |
| // Instead, padding will most likely be send as an RTX packet. |
| const int kPaddingBitrateBps = 50000; |
| encoder_config->max_bitrate_bps = 200000; |
| encoder_config->min_transmit_bitrate_bps = |
| encoder_config->max_bitrate_bps + kPaddingBitrateBps; |
| |
| // Configure RTX for redundant payload padding. |
| send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[stream_index]); |
| send_config->rtp.rtx.payload_type = kSendRtxPayloadType; |
| rtx_to_media_ssrcs_[kSendRtxSsrcs[stream_index]] = |
| send_config->rtp.ssrcs[0]; |
| |
| if (stream_index == 0) |
| first_media_ssrc_ = send_config->rtp.ssrcs[0]; |
| } |
| |
| void UpdateReceiveConfig( |
| size_t stream_index, |
| VideoReceiveStream::Config* receive_config) override { |
| receive_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| receive_config->rtp.extensions.clear(); |
| receive_config->rtp.extensions.push_back(RtpExtension( |
| RtpExtension::kTransportSequenceNumberUri, kExtensionId)); |
| receive_config->renderer = &fake_renderer_; |
| } |
| |
| test::DirectTransport* CreateSendTransport( |
| test::SingleThreadedTaskQueueForTesting* task_queue, |
| Call* sender_call) override { |
| std::map<uint8_t, MediaType> payload_type_map = |
| MultiStreamTest::payload_type_map_; |
| RTC_DCHECK(payload_type_map.find(kSendRtxPayloadType) == |
| payload_type_map.end()); |
| payload_type_map[kSendRtxPayloadType] = MediaType::VIDEO; |
| observer_ = new RtpExtensionHeaderObserver( |
| task_queue, sender_call, first_media_ssrc_, rtx_to_media_ssrcs_, |
| payload_type_map); |
| return observer_; |
| } |
| |
| private: |
| test::FakeVideoRenderer fake_renderer_; |
| uint32_t first_media_ssrc_; |
| std::map<uint32_t, uint32_t> rtx_to_media_ssrcs_; |
| RtpExtensionHeaderObserver* observer_; |
| } tester(&task_queue_); |
| |
| tester.RunTest(); |
| } |
| |
| class TransportFeedbackTester : public test::EndToEndTest { |
| public: |
| TransportFeedbackTester(bool feedback_enabled, |
| size_t num_video_streams, |
| size_t num_audio_streams) |
| : EndToEndTest(::webrtc::EndToEndTest::kDefaultTimeoutMs), |
| feedback_enabled_(feedback_enabled), |
| num_video_streams_(num_video_streams), |
| num_audio_streams_(num_audio_streams), |
| receiver_call_(nullptr) { |
| // Only one stream of each supported for now. |
| EXPECT_LE(num_video_streams, 1u); |
| EXPECT_LE(num_audio_streams, 1u); |
| } |
| |
| protected: |
| Action OnSendRtcp(const uint8_t* data, size_t length) override { |
| EXPECT_FALSE(HasTransportFeedback(data, length)); |
| return SEND_PACKET; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* data, size_t length) override { |
| if (HasTransportFeedback(data, length)) |
| observation_complete_.Set(); |
| return SEND_PACKET; |
| } |
| |
| bool HasTransportFeedback(const uint8_t* data, size_t length) const { |
| test::RtcpPacketParser parser; |
| EXPECT_TRUE(parser.Parse(data, length)); |
| return parser.transport_feedback()->num_packets() > 0; |
| } |
| |
| void PerformTest() override { |
| const int64_t kDisabledFeedbackTimeoutMs = 5000; |
| EXPECT_EQ(feedback_enabled_, |
| observation_complete_.Wait(feedback_enabled_ |
| ? test::CallTest::kDefaultTimeoutMs |
| : kDisabledFeedbackTimeoutMs)); |
| } |
| |
| void OnCallsCreated(Call* sender_call, Call* receiver_call) override { |
| receiver_call_ = receiver_call; |
| } |
| |
| size_t GetNumVideoStreams() const override { return num_video_streams_; } |
| size_t GetNumAudioStreams() const override { return num_audio_streams_; } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| (*receive_configs)[0].rtp.transport_cc = feedback_enabled_; |
| } |
| |
| void ModifyAudioConfigs( |
| AudioSendStream::Config* send_config, |
| std::vector<AudioReceiveStream::Config>* receive_configs) override { |
| send_config->rtp.extensions.clear(); |
| send_config->rtp.extensions.push_back( |
| RtpExtension(RtpExtension::kTransportSequenceNumberUri, kExtensionId)); |
| (*receive_configs)[0].rtp.extensions.clear(); |
| (*receive_configs)[0].rtp.extensions = send_config->rtp.extensions; |
| (*receive_configs)[0].rtp.transport_cc = feedback_enabled_; |
| } |
| |
| private: |
| static const int kExtensionId = 5; |
| const bool feedback_enabled_; |
| const size_t num_video_streams_; |
| const size_t num_audio_streams_; |
| Call* receiver_call_; |
| }; |
| |
| TEST_F(EndToEndTest, VideoReceivesTransportFeedback) { |
| TransportFeedbackTester test(true, 1, 0); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, VideoTransportFeedbackNotConfigured) { |
| TransportFeedbackTester test(false, 1, 0); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, AudioReceivesTransportFeedback) { |
| TransportFeedbackTester test(true, 0, 1); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, AudioTransportFeedbackNotConfigured) { |
| TransportFeedbackTester test(false, 0, 1); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, AudioVideoReceivesTransportFeedback) { |
| TransportFeedbackTester test(true, 1, 1); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, StopsSendingMediaWithoutFeedback) { |
| test::ScopedFieldTrials override_field_trials( |
| "WebRTC-CwndExperiment/Enabled-250/"); |
| |
| class TransportFeedbackTester : public test::EndToEndTest { |
| public: |
| TransportFeedbackTester(size_t num_video_streams, size_t num_audio_streams) |
| : EndToEndTest(::webrtc::EndToEndTest::kDefaultTimeoutMs), |
| num_video_streams_(num_video_streams), |
| num_audio_streams_(num_audio_streams), |
| media_sent_(0), |
| padding_sent_(0) { |
| // Only one stream of each supported for now. |
| EXPECT_LE(num_video_streams, 1u); |
| EXPECT_LE(num_audio_streams, 1u); |
| } |
| |
| protected: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| RTPHeader header; |
| EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| const bool only_padding = |
| header.headerLength + header.paddingLength == length; |
| rtc::CritScope lock(&crit_); |
| if (only_padding) { |
| ++padding_sent_; |
| } else { |
| ++media_sent_; |
| EXPECT_LT(media_sent_, 40) << "Media sent without feedback."; |
| } |
| |
| return SEND_PACKET; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* data, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| if (media_sent_ > 20 && HasTransportFeedback(data, length)) { |
| return DROP_PACKET; |
| } |
| return SEND_PACKET; |
| } |
| |
| bool HasTransportFeedback(const uint8_t* data, size_t length) const { |
| test::RtcpPacketParser parser; |
| EXPECT_TRUE(parser.Parse(data, length)); |
| return parser.transport_feedback()->num_packets() > 0; |
| } |
| |
| Call::Config GetSenderCallConfig() override { |
| Call::Config config = EndToEndTest::GetSenderCallConfig(); |
| config.bitrate_config.max_bitrate_bps = 300000; |
| return config; |
| } |
| |
| void PerformTest() override { |
| const int64_t kDisabledFeedbackTimeoutMs = 10000; |
| observation_complete_.Wait(kDisabledFeedbackTimeoutMs); |
| rtc::CritScope lock(&crit_); |
| EXPECT_GT(padding_sent_, 0); |
| } |
| |
| size_t GetNumVideoStreams() const override { return num_video_streams_; } |
| size_t GetNumAudioStreams() const override { return num_audio_streams_; } |
| |
| private: |
| const size_t num_video_streams_; |
| const size_t num_audio_streams_; |
| rtc::CriticalSection crit_; |
| int media_sent_ GUARDED_BY(crit_); |
| int padding_sent_ GUARDED_BY(crit_); |
| } test(1, 0); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, ObserversEncodedFrames) { |
| class EncodedFrameTestObserver : public EncodedFrameObserver { |
| public: |
| EncodedFrameTestObserver() |
| : length_(0), frame_type_(kEmptyFrame), called_(false, false) {} |
| virtual ~EncodedFrameTestObserver() {} |
| |
| virtual void EncodedFrameCallback(const EncodedFrame& encoded_frame) { |
| frame_type_ = encoded_frame.frame_type_; |
| length_ = encoded_frame.length_; |
| buffer_.reset(new uint8_t[length_]); |
| memcpy(buffer_.get(), encoded_frame.data_, length_); |
| called_.Set(); |
| } |
| |
| bool Wait() { return called_.Wait(kDefaultTimeoutMs); } |
| |
| void ExpectEqualFrames(const EncodedFrameTestObserver& observer) { |
| ASSERT_EQ(length_, observer.length_) |
| << "Observed frames are of different lengths."; |
| EXPECT_EQ(frame_type_, observer.frame_type_) |
| << "Observed frames have different frame types."; |
| EXPECT_EQ(0, memcmp(buffer_.get(), observer.buffer_.get(), length_)) |
| << "Observed encoded frames have different content."; |
| } |
| |
| private: |
| std::unique_ptr<uint8_t[]> buffer_; |
| size_t length_; |
| FrameType frame_type_; |
| rtc::Event called_; |
| }; |
| |
| EncodedFrameTestObserver post_encode_observer; |
| EncodedFrameTestObserver pre_decode_observer; |
| test::FrameForwarder forwarder; |
| std::unique_ptr<test::FrameGenerator> frame_generator; |
| |
| std::unique_ptr<test::DirectTransport> sender_transport; |
| std::unique_ptr<test::DirectTransport> receiver_transport; |
| |
| task_queue_.SendTask([&]() { |
| CreateCalls(Call::Config(event_log_.get()), Call::Config(event_log_.get())); |
| |
| sender_transport = rtc::MakeUnique<test::DirectTransport>( |
| &task_queue_, sender_call_.get(), payload_type_map_); |
| receiver_transport = rtc::MakeUnique<test::DirectTransport>( |
| &task_queue_, receiver_call_.get(), payload_type_map_); |
| sender_transport->SetReceiver(receiver_call_->Receiver()); |
| receiver_transport->SetReceiver(sender_call_->Receiver()); |
| |
| CreateSendConfig(1, 0, 0, sender_transport.get()); |
| CreateMatchingReceiveConfigs(receiver_transport.get()); |
| video_send_config_.post_encode_callback = &post_encode_observer; |
| video_receive_configs_[0].pre_decode_callback = &pre_decode_observer; |
| |
| CreateVideoStreams(); |
| Start(); |
| |
| frame_generator = test::FrameGenerator::CreateSquareGenerator( |
| kDefaultWidth, kDefaultHeight); |
| video_send_stream_->SetSource( |
| &forwarder, VideoSendStream::DegradationPreference::kMaintainFramerate); |
| forwarder.IncomingCapturedFrame(*frame_generator->NextFrame()); |
| }); |
| |
| EXPECT_TRUE(post_encode_observer.Wait()) |
| << "Timed out while waiting for send-side encoded-frame callback."; |
| |
| EXPECT_TRUE(pre_decode_observer.Wait()) |
| << "Timed out while waiting for pre-decode encoded-frame callback."; |
| |
| post_encode_observer.ExpectEqualFrames(pre_decode_observer); |
| |
| task_queue_.SendTask([this, &sender_transport, &receiver_transport]() { |
| Stop(); |
| DestroyStreams(); |
| sender_transport.reset(); |
| receiver_transport.reset(); |
| DestroyCalls(); |
| }); |
| } |
| |
| TEST_F(EndToEndTest, ReceiveStreamSendsRemb) { |
| class RembObserver : public test::EndToEndTest { |
| public: |
| RembObserver() : EndToEndTest(kDefaultTimeoutMs) {} |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| send_config->rtp.extensions.clear(); |
| send_config->rtp.extensions.push_back(RtpExtension( |
| RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId)); |
| (*receive_configs)[0].rtp.remb = true; |
| (*receive_configs)[0].rtp.transport_cc = false; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { |
| test::RtcpPacketParser parser; |
| EXPECT_TRUE(parser.Parse(packet, length)); |
| |
| if (parser.remb()->num_packets() > 0) { |
| EXPECT_EQ(kReceiverLocalVideoSsrc, parser.remb()->sender_ssrc()); |
| EXPECT_LT(0U, parser.remb()->bitrate_bps()); |
| EXPECT_EQ(1U, parser.remb()->ssrcs().size()); |
| EXPECT_EQ(kVideoSendSsrcs[0], parser.remb()->ssrcs()[0]); |
| observation_complete_.Set(); |
| } |
| |
| return SEND_PACKET; |
| } |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) << "Timed out while waiting for a " |
| "receiver RTCP REMB packet to be " |
| "sent."; |
| } |
| } test; |
| |
| RunBaseTest(&test); |
| } |
| |
| class BandwidthStatsTest : public test::EndToEndTest { |
| public: |
| explicit BandwidthStatsTest(bool send_side_bwe) |
| : EndToEndTest(test::CallTest::kDefaultTimeoutMs), |
| sender_call_(nullptr), |
| receiver_call_(nullptr), |
| has_seen_pacer_delay_(false), |
| send_side_bwe_(send_side_bwe) {} |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| if (!send_side_bwe_) { |
| send_config->rtp.extensions.clear(); |
| send_config->rtp.extensions.push_back(RtpExtension( |
| RtpExtension::kAbsSendTimeUri, test::kAbsSendTimeExtensionId)); |
| (*receive_configs)[0].rtp.remb = true; |
| (*receive_configs)[0].rtp.transport_cc = false; |
| } |
| } |
| |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| Call::Stats sender_stats = sender_call_->GetStats(); |
| Call::Stats receiver_stats = receiver_call_->GetStats(); |
| if (!has_seen_pacer_delay_) |
| has_seen_pacer_delay_ = sender_stats.pacer_delay_ms > 0; |
| if (sender_stats.send_bandwidth_bps > 0 && has_seen_pacer_delay_) { |
| if (send_side_bwe_ || receiver_stats.recv_bandwidth_bps > 0) |
| observation_complete_.Set(); |
| } |
| return SEND_PACKET; |
| } |
| |
| void OnCallsCreated(Call* sender_call, Call* receiver_call) override { |
| sender_call_ = sender_call; |
| receiver_call_ = receiver_call; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) << "Timed out while waiting for " |
| "non-zero bandwidth stats."; |
| } |
| |
| private: |
| Call* sender_call_; |
| Call* receiver_call_; |
| bool has_seen_pacer_delay_; |
| const bool send_side_bwe_; |
| }; |
| |
| TEST_F(EndToEndTest, VerifySendSideBweStats) { |
| BandwidthStatsTest test(true); |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, VerifyRecvSideBweStats) { |
| BandwidthStatsTest test(false); |
| RunBaseTest(&test); |
| } |
| |
| // Verifies that it's possible to limit the send BWE by sending a REMB. |
| // This is verified by allowing the send BWE to ramp-up to >1000 kbps, |
| // then have the test generate a REMB of 500 kbps and verify that the send BWE |
| // is reduced to exactly 500 kbps. Then a REMB of 1000 kbps is generated and the |
| // test verifies that the send BWE ramps back up to exactly 1000 kbps. |
| TEST_F(EndToEndTest, RembWithSendSideBwe) { |
| class BweObserver : public test::EndToEndTest { |
| public: |
| BweObserver() |
| : EndToEndTest(kDefaultTimeoutMs), |
| sender_call_(nullptr), |
| clock_(Clock::GetRealTimeClock()), |
| sender_ssrc_(0), |
| remb_bitrate_bps_(1000000), |
| receive_transport_(nullptr), |
| stop_event_(false, false), |
| poller_thread_(&BitrateStatsPollingThread, |
| this, |
| "BitrateStatsPollingThread"), |
| state_(kWaitForFirstRampUp), |
| retransmission_rate_limiter_(clock_, 1000) {} |
| |
| ~BweObserver() {} |
| |
| test::PacketTransport* CreateReceiveTransport( |
| test::SingleThreadedTaskQueueForTesting* task_queue) override { |
| receive_transport_ = new test::PacketTransport( |
| task_queue, nullptr, this, test::PacketTransport::kReceiver, |
| payload_type_map_, FakeNetworkPipe::Config()); |
| return receive_transport_; |
| } |
| |
| Call::Config GetSenderCallConfig() override { |
| Call::Config config(event_log_.get()); |
| // Set a high start bitrate to reduce the test completion time. |
| config.bitrate_config.start_bitrate_bps = remb_bitrate_bps_; |
| return config; |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| ASSERT_EQ(1u, send_config->rtp.ssrcs.size()); |
| sender_ssrc_ = send_config->rtp.ssrcs[0]; |
| |
| encoder_config->max_bitrate_bps = 2000000; |
| |
| ASSERT_EQ(1u, receive_configs->size()); |
| RtpRtcp::Configuration config; |
| config.receiver_only = true; |
| config.clock = clock_; |
| config.outgoing_transport = receive_transport_; |
| config.retransmission_rate_limiter = &retransmission_rate_limiter_; |
| rtp_rtcp_.reset(RtpRtcp::CreateRtpRtcp(config)); |
| rtp_rtcp_->SetRemoteSSRC((*receive_configs)[0].rtp.remote_ssrc); |
| rtp_rtcp_->SetSSRC((*receive_configs)[0].rtp.local_ssrc); |
| rtp_rtcp_->SetREMBStatus(true); |
| rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize); |
| } |
| |
| void OnCallsCreated(Call* sender_call, Call* receiver_call) override { |
| sender_call_ = sender_call; |
| } |
| |
| static void BitrateStatsPollingThread(void* obj) { |
| static_cast<BweObserver*>(obj)->PollStats(); |
| } |
| |
| void PollStats() { |
| do { |
| if (sender_call_) { |
| Call::Stats stats = sender_call_->GetStats(); |
| switch (state_) { |
| case kWaitForFirstRampUp: |
| if (stats.send_bandwidth_bps >= remb_bitrate_bps_) { |
| state_ = kWaitForRemb; |
| remb_bitrate_bps_ /= 2; |
| rtp_rtcp_->SetREMBData( |
| remb_bitrate_bps_, |
| std::vector<uint32_t>(&sender_ssrc_, &sender_ssrc_ + 1)); |
| rtp_rtcp_->SendRTCP(kRtcpRr); |
| } |
| break; |
| |
| case kWaitForRemb: |
| if (stats.send_bandwidth_bps == remb_bitrate_bps_) { |
| state_ = kWaitForSecondRampUp; |
| remb_bitrate_bps_ *= 2; |
| rtp_rtcp_->SetREMBData( |
| remb_bitrate_bps_, |
| std::vector<uint32_t>(&sender_ssrc_, &sender_ssrc_ + 1)); |
| rtp_rtcp_->SendRTCP(kRtcpRr); |
| } |
| break; |
| |
| case kWaitForSecondRampUp: |
| if (stats.send_bandwidth_bps == remb_bitrate_bps_) { |
| observation_complete_.Set(); |
| } |
| break; |
| } |
| } |
| } while (!stop_event_.Wait(1000)); |
| } |
| |
| void PerformTest() override { |
| poller_thread_.Start(); |
| EXPECT_TRUE(Wait()) |
| << "Timed out while waiting for bitrate to change according to REMB."; |
| stop_event_.Set(); |
| poller_thread_.Stop(); |
| } |
| |
| private: |
| enum TestState { kWaitForFirstRampUp, kWaitForRemb, kWaitForSecondRampUp }; |
| |
| Call* sender_call_; |
| Clock* const clock_; |
| uint32_t sender_ssrc_; |
| int remb_bitrate_bps_; |
| std::unique_ptr<RtpRtcp> rtp_rtcp_; |
| test::PacketTransport* receive_transport_; |
| rtc::Event stop_event_; |
| rtc::PlatformThread poller_thread_; |
| TestState state_; |
| RateLimiter retransmission_rate_limiter_; |
| } test; |
| |
| RunBaseTest(&test); |
| } |
| |
| TEST_F(EndToEndTest, StopSendingKeyframeRequestsForInactiveStream) { |
| class KeyframeRequestObserver : public test::EndToEndTest { |
| public: |
| explicit KeyframeRequestObserver( |
| test::SingleThreadedTaskQueueForTesting* task_queue) |
| : clock_(Clock::GetRealTimeClock()), task_queue_(task_queue) {} |
| |
| void OnVideoStreamsCreated( |
| VideoSendStream* send_stream, |
| const std::vector<VideoReceiveStream*>& receive_streams) override { |
| RTC_DCHECK_EQ(1, receive_streams.size()); |
| send_stream_ = send_stream; |
| receive_stream_ = receive_streams[0]; |
| } |
| |
| void PerformTest() override { |
| bool frame_decoded = false; |
| int64_t start_time = clock_->TimeInMilliseconds(); |
| while (clock_->TimeInMilliseconds() - start_time <= 5000) { |
| if (receive_stream_->GetStats().frames_decoded > 0) { |
| frame_decoded = true; |
| break; |
| } |
| SleepMs(100); |
| } |
| ASSERT_TRUE(frame_decoded); |
| task_queue_->SendTask([this]() { send_stream_->Stop(); }); |
| SleepMs(10000); |
| ASSERT_EQ( |
| 1U, receive_stream_->GetStats().rtcp_packet_type_counts.pli_packets); |
| } |
| |
| private: |
| Clock* clock_; |
| VideoSendStream* send_stream_; |
| VideoReceiveStream* receive_stream_; |
| test::SingleThreadedTaskQueueForTesting* const task_queue_; |
| } test(&task_queue_); |
| |
| RunBaseTest(&test); |
| } |
| |
| class ProbingTest : public test::EndToEndTest { |
| public: |
| explicit ProbingTest(int start_bitrate_bps) |
| : clock_(Clock::GetRealTimeClock()), |
| start_bitrate_bps_(start_bitrate_bps), |
| state_(0), |
| sender_call_(nullptr) {} |
| |
| ~ProbingTest() {} |
| |
| Call::Config GetSenderCallConfig() override { |
| Call::Config config(event_log_.get()); |
| config.bitrate_config.start_bitrate_bps = start_bitrate_bps_; |
| return config; |
| } |
| |
| void OnCallsCreated(Call* sender_call, Call* receiver_call) override { |
| sender_call_ = sender_call; |
| } |
| |
| protected: |
| Clock* const clock_; |
| const int start_bitrate_bps_; |
| int state_; |
| Call* sender_call_; |
| }; |
| |
| TEST_F(EndToEndTest, MAYBE_InitialProbing) { |
| class InitialProbingTest : public ProbingTest { |
| public: |
| explicit InitialProbingTest(bool* success) |
| : ProbingTest(300000), success_(success) { |
| *success_ = false; |
| } |
| |
| void PerformTest() override { |
| int64_t start_time_ms = clock_->TimeInMilliseconds(); |
| do { |
| if (clock_->TimeInMilliseconds() - start_time_ms > kTimeoutMs) |
| break; |
| |
| Call::Stats stats = sender_call_->GetStats(); |
| // Initial probing is done with a x3 and x6 multiplier of the start |
| // bitrate, so a x4 multiplier is a high enough threshold. |
| if (stats.send_bandwidth_bps > 4 * 300000) { |
| *success_ = true; |
| break; |
| } |
| } while (!observation_complete_.Wait(20)); |
| } |
| |
| private: |
| const int kTimeoutMs = 1000; |
| bool* const success_; |
| }; |
| |
| bool success = false; |
| const int kMaxAttempts = 3; |
| for (int i = 0; i < kMaxAttempts; ++i) { |
| InitialProbingTest test(&success); |
| RunBaseTest(&test); |
| if (success) |
| return; |
| } |
| EXPECT_TRUE(success) << "Failed to perform mid initial probing (" |
| << kMaxAttempts << " attempts)."; |
| } |
| |
| // Fails on Linux MSan: bugs.webrtc.org/7428 |
| #if defined(MEMORY_SANITIZER) |
| TEST_F(EndToEndTest, DISABLED_TriggerMidCallProbing) { |
| // Fails on iOS bots: bugs.webrtc.org/7851 |
| #elif defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR |
| TEST_F(EndToEndTest, DISABLED_TriggerMidCallProbing) { |
| #else |
| TEST_F(EndToEndTest, TriggerMidCallProbing) { |
| #endif |
| |
| class TriggerMidCallProbingTest : public ProbingTest { |
| public: |
| TriggerMidCallProbingTest( |
| test::SingleThreadedTaskQueueForTesting* task_queue, |
| bool* success) |
| : ProbingTest(300000), success_(success), task_queue_(task_queue) {} |
| |
| void PerformTest() override { |
| *success_ = false; |
| int64_t start_time_ms = clock_->TimeInMilliseconds(); |
| do { |
| if (clock_->TimeInMilliseconds() - start_time_ms > kTimeoutMs) |
| break; |
| |
| Call::Stats stats = sender_call_->GetStats(); |
| |
| switch (state_) { |
| case 0: |
| if (stats.send_bandwidth_bps > 5 * 300000) { |
| Call::Config::BitrateConfig bitrate_config; |
| bitrate_config.max_bitrate_bps = 100000; |
| task_queue_->SendTask([this, &bitrate_config]() { |
| sender_call_->SetBitrateConfig(bitrate_config); |
| }); |
| ++state_; |
| } |
| break; |
| case 1: |
| if (stats.send_bandwidth_bps < 110000) { |
| Call::Config::BitrateConfig bitrate_config; |
| bitrate_config.max_bitrate_bps = 2500000; |
| task_queue_->SendTask([this, &bitrate_config]() { |
| sender_call_->SetBitrateConfig(bitrate_config); |
| }); |
| ++state_; |
| } |
| break; |
| case 2: |
| // During high cpu load the pacer will not be able to pace packets |
| // at the correct speed, but if we go from 110 to 1250 kbps |
| // in 5 seconds then it is due to probing. |
| if (stats.send_bandwidth_bps > 1250000) { |
| *success_ = true; |
| observation_complete_.Set(); |
| } |
| break; |
| } |
| } while (!observation_complete_.Wait(20)); |
| } |
| |
| private: |
| const int kTimeoutMs = 5000; |
| bool* const success_; |
| test::SingleThreadedTaskQueueForTesting* const task_queue_; |
| }; |
| |
| bool success = false; |
| const int kMaxAttempts = 3; |
| for (int i = 0; i < kMaxAttempts; ++i) { |
| TriggerMidCallProbingTest test(&task_queue_, &success); |
| RunBaseTest(&test); |
| if (success) |
| return; |
| } |
| EXPECT_TRUE(success) << "Failed to perform mid call probing (" << kMaxAttempts |
| << " attempts)."; |
| } |
| |
| TEST_F(EndToEndTest, VerifyNackStats) { |
| static const int kPacketNumberToDrop = 200; |
| class NackObserver : public test::EndToEndTest { |
| public: |
| NackObserver() |
| : EndToEndTest(kLongTimeoutMs), |
| sent_rtp_packets_(0), |
| dropped_rtp_packet_(0), |
| dropped_rtp_packet_requested_(false), |
| send_stream_(nullptr), |
| start_runtime_ms_(-1) {} |
| |
| private: |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| if (++sent_rtp_packets_ == kPacketNumberToDrop) { |
| std::unique_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create()); |
| RTPHeader header; |
| EXPECT_TRUE(parser->Parse(packet, length, &header)); |
| dropped_rtp_packet_ = header.sequenceNumber; |
| return DROP_PACKET; |
| } |
| VerifyStats(); |
| return SEND_PACKET; |
| } |
| |
| Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { |
| rtc::CritScope lock(&crit_); |
| test::RtcpPacketParser rtcp_parser; |
| rtcp_parser.Parse(packet, length); |
| const std::vector<uint16_t>& nacks = rtcp_parser.nack()->packet_ids(); |
| if (!nacks.empty() && std::find( |
| nacks.begin(), nacks.end(), dropped_rtp_packet_) != nacks.end()) { |
| dropped_rtp_packet_requested_ = true; |
| } |
| return SEND_PACKET; |
| } |
| |
| void VerifyStats() EXCLUSIVE_LOCKS_REQUIRED(&crit_) { |
| if (!dropped_rtp_packet_requested_) |
| return; |
| int send_stream_nack_packets = 0; |
| int receive_stream_nack_packets = 0; |
| VideoSendStream::Stats stats = send_stream_->GetStats(); |
| for (std::map<uint32_t, VideoSendStream::StreamStats>::const_iterator it = |
| stats.substreams.begin(); it != stats.substreams.end(); ++it) { |
| const VideoSendStream::StreamStats& stream_stats = it->second; |
| send_stream_nack_packets += |
| stream_stats.rtcp_packet_type_counts.nack_packets; |
| } |
| for (size_t i = 0; i < receive_streams_.size(); ++i) { |
| VideoReceiveStream::Stats stats = receive_streams_[i]->GetStats(); |
| receive_stream_nack_packets += |
| stats.rtcp_packet_type_counts.nack_packets; |
| } |
| if (send_stream_nack_packets >= 1 && receive_stream_nack_packets >= 1) { |
| // NACK packet sent on receive stream and received on sent stream. |
| if (MinMetricRunTimePassed()) |
| observation_complete_.Set(); |
| } |
| } |
| |
| bool MinMetricRunTimePassed() { |
| int64_t now = Clock::GetRealTimeClock()->TimeInMilliseconds(); |
| if (start_runtime_ms_ == -1) { |
| start_runtime_ms_ = now; |
| return false; |
| } |
| int64_t elapsed_sec = (now - start_runtime_ms_) / 1000; |
| return elapsed_sec > metrics::kMinRunTimeInSeconds; |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].renderer = &fake_renderer_; |
| } |
| |
| void OnVideoStreamsCreated( |
| VideoSendStream* send_stream, |
| const std::vector<VideoReceiveStream*>& receive_streams) override { |
| send_stream_ = send_stream; |
| receive_streams_ = receive_streams; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) << "Timed out waiting for packet to be NACKed."; |
| } |
| |
| test::FakeVideoRenderer fake_renderer_; |
| rtc::CriticalSection crit_; |
| uint64_t sent_rtp_packets_; |
| uint16_t dropped_rtp_packet_ GUARDED_BY(&crit_); |
| bool dropped_rtp_packet_requested_ GUARDED_BY(&crit_); |
| std::vector<VideoReceiveStream*> receive_streams_; |
| VideoSendStream* send_stream_; |
| int64_t start_runtime_ms_; |
| } test; |
| |
| metrics::Reset(); |
| RunBaseTest(&test); |
| |
| EXPECT_EQ( |
| 1, metrics::NumSamples("WebRTC.Video.UniqueNackRequestsSentInPercent")); |
| EXPECT_EQ(1, metrics::NumSamples( |
| "WebRTC.Video.UniqueNackRequestsReceivedInPercent")); |
| EXPECT_GT(metrics::MinSample("WebRTC.Video.NackPacketsSentPerMinute"), 0); |
| } |
| |
| void EndToEndTest::VerifyHistogramStats(bool use_rtx, |
| bool use_red, |
| bool screenshare) { |
| class StatsObserver : public test::EndToEndTest, |
| public rtc::VideoSinkInterface<VideoFrame> { |
| public: |
| StatsObserver(bool use_rtx, bool use_red, bool screenshare) |
| : EndToEndTest(kLongTimeoutMs), |
| use_rtx_(use_rtx), |
| use_red_(use_red), |
| screenshare_(screenshare), |
| // This test uses NACK, so to send FEC we can't use a fake encoder. |
| vp8_encoder_(use_red ? VP8Encoder::Create() : nullptr), |
| sender_call_(nullptr), |
| receiver_call_(nullptr), |
| start_runtime_ms_(-1), |
| num_frames_received_(0) {} |
| |
| private: |
| void OnFrame(const VideoFrame& video_frame) override { |
| // The RTT is needed to estimate |ntp_time_ms| which is used by |
| // end-to-end delay stats. Therefore, start counting received frames once |
| // |ntp_time_ms| is valid. |
| if (video_frame.ntp_time_ms() > 0 && |
| Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >= |
| video_frame.ntp_time_ms()) { |
| rtc::CritScope lock(&crit_); |
| ++num_frames_received_; |
| } |
| } |
| |
| Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| if (MinMetricRunTimePassed() && MinNumberOfFramesReceived()) |
| observation_complete_.Set(); |
| |
| return SEND_PACKET; |
| } |
| |
| bool MinMetricRunTimePassed() { |
| int64_t now = Clock::GetRealTimeClock()->TimeInMilliseconds(); |
| if (start_runtime_ms_ == -1) { |
| start_runtime_ms_ = now; |
| return false; |
| } |
| int64_t elapsed_sec = (now - start_runtime_ms_) / 1000; |
| return elapsed_sec > metrics::kMinRunTimeInSeconds * 2; |
| } |
| |
| bool MinNumberOfFramesReceived() const { |
| const int kMinRequiredHistogramSamples = 200; |
| rtc::CritScope lock(&crit_); |
| return num_frames_received_ > kMinRequiredHistogramSamples; |
| } |
| |
| void ModifyVideoConfigs( |
| VideoSendStream::Config* send_config, |
| std::vector<VideoReceiveStream::Config>* receive_configs, |
| VideoEncoderConfig* encoder_config) override { |
| // NACK |
| send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs; |
| (*receive_configs)[0].renderer = this; |
| // FEC |
| if (use_red_) { |
| send_config->rtp.ulpfec.ulpfec_payload_type = kUlpfecPayloadType; |
| send_config->rtp.ulpfec.red_payload_type = kRedPayloadType; |
| send_config->encoder_settings.encoder = vp8_encoder_.get(); |
| send_config->encoder_settings.payload_name = "VP8"; |
| (*receive_configs)[0].decoders[0].payload_name = "VP8"; |
| (*receive_configs)[0].rtp.ulpfec.red_payload_type = kRedPayloadType; |
| (*receive_configs)[0].rtp.ulpfec.ulpfec_payload_type = |
| kUlpfecPayloadType; |
| } |
| // RTX |
| if (use_rtx_) { |
| send_config->rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]); |
| send_config->rtp.rtx.payload_type = kSendRtxPayloadType; |
| (*receive_configs)[0].rtp.rtx_ssrc = kSendRtxSsrcs[0]; |
| (*receive_configs)[0] |
| .rtp.rtx_associated_payload_types[kSendRtxPayloadType] = |
| kFakeVideoSendPayloadType; |
| } |
| // RTT needed for RemoteNtpTimeEstimator for the receive stream. |
| (*receive_configs)[0].rtp.rtcp_xr.receiver_reference_time_report = true; |
| encoder_config->content_type = |
| screenshare_ ? VideoEncoderConfig::ContentType::kScreen |
| : VideoEncoderConfig::ContentType::kRealtimeVideo; |
| } |
| |
| void OnCallsCreated(Call* sender_call, Call* receiver_call) override { |
| sender_call_ = sender_call; |
| receiver_call_ = receiver_call; |
| } |
| |
| void PerformTest() override { |
| EXPECT_TRUE(Wait()) << "Timed out waiting for packet to be NACKed."; |
| } |
| |
| rtc::CriticalSection crit_; |
| const bool use_rtx_; |
| const bool use_red_; |
| const bool screenshare_; |
| const std::unique_ptr<VideoEncoder> vp8_encoder_; |
| Call* sender_call_; |
| Call* receiver_call_; |
| int64_t start_runtime_ms_; |
| int num_frames_received_ GUARDED_BY(&crit_); |
| } test(use_rtx, use_red, screenshare); |
| |
| metrics::Reset(); |
| RunBaseTest(&test); |
| |
| std::string video_prefix = |
| screenshare ? "WebRTC.Video.Screenshare." : "WebRTC.Video."; |
| |
| // Verify that stats have been updated once. |
| EXPECT_EQ(2, metrics::NumSamples("WebRTC.Call.LifetimeInSeconds")); |
| EXPECT_EQ(1, metrics::NumSamples( |
| "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.VideoBitrateReceivedInKbps")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.RtcpBitrateReceivedInBps")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.BitrateReceivedInKbps")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.EstimatedSendBitrateInKbps")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Call.PacerBitrateInKbps")); |
| |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.SendStreamLifetimeInSeconds")); |
| EXPECT_EQ(1, |
| metrics::NumSamples("WebRTC.Video.ReceiveStreamLifetimeInSeconds")); |
| |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.NackPacketsSentPerMinute")); |
| EXPECT_EQ(1, |
| metrics::NumSamples(video_prefix + "NackPacketsReceivedPerMinute")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.FirPacketsSentPerMinute")); |
| EXPECT_EQ(1, |
| metrics::NumSamples(video_prefix + "FirPacketsReceivedPerMinute")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.PliPacketsSentPerMinute")); |
| EXPECT_EQ(1, |
| metrics::NumSamples(video_prefix + "PliPacketsReceivedPerMinute")); |
| |
| EXPECT_EQ(1, metrics::NumSamples(video_prefix + "KeyFramesSentInPermille")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.KeyFramesReceivedInPermille")); |
| |
| EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentPacketsLostInPercent")); |
| EXPECT_EQ(1, |
| metrics::NumSamples("WebRTC.Video.ReceivedPacketsLostInPercent")); |
| |
| EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InputWidthInPixels")); |
| EXPECT_EQ(1, metrics::NumSamples(video_prefix + "InputHeightInPixels")); |
| EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentWidthInPixels")); |
| EXPECT_EQ(1, metrics::NumSamples(video_prefix + "SentHeightInPixels")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.ReceivedWidthInPixels")); |
| EXPECT_EQ(1, metrics::NumSamples("WebRTC.Video.ReceivedHeightInPixels")); |
| |
| EXPECT_EQ(1, metrics::NumEvents(video_prefix + "InputWidthInPixels", |
| kDefaultWidth)); |
| EXPECT_EQ(1, metrics::NumEvents(video_prefix + "InputHeightInPixels", |
|