blob: 89b9071727c8cfb1413d6103569d469cf065956e [file] [log] [blame]
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "remoting/host/client_session.h"
#include <algorithm>
#include <memory>
#include <utility>
#include "base/bind.h"
#include "base/command_line.h"
#include "base/memory/ptr_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/task/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "remoting/base/capabilities.h"
#include "remoting/base/constants.h"
#include "remoting/base/logging.h"
#include "remoting/base/session_options.h"
#include "remoting/host/action_executor.h"
#include "remoting/host/action_message_handler.h"
#include "remoting/host/audio_capturer.h"
#include "remoting/host/base/screen_controls.h"
#include "remoting/host/base/screen_resolution.h"
#include "remoting/host/desktop_display_info_monitor.h"
#include "remoting/host/desktop_environment.h"
#include "remoting/host/file_transfer/file_transfer_message_handler.h"
#include "remoting/host/file_transfer/rtc_log_file_operations.h"
#include "remoting/host/host_extension_session.h"
#include "remoting/host/input_injector.h"
#include "remoting/host/keyboard_layout_monitor.h"
#include "remoting/host/mouse_shape_pump.h"
#include "remoting/host/remote_open_url/remote_open_url_constants.h"
#include "remoting/host/remote_open_url/remote_open_url_message_handler.h"
#include "remoting/host/remote_open_url/url_forwarder_configurator.h"
#include "remoting/host/remote_open_url/url_forwarder_control_message_handler.h"
#include "remoting/host/webauthn/remote_webauthn_constants.h"
#include "remoting/host/webauthn/remote_webauthn_message_handler.h"
#include "remoting/proto/control.pb.h"
#include "remoting/proto/event.pb.h"
#include "remoting/protocol/audio_stream.h"
#include "remoting/protocol/capability_names.h"
#include "remoting/protocol/client_stub.h"
#include "remoting/protocol/clipboard_thread_proxy.h"
#include "remoting/protocol/pairing_registry.h"
#include "remoting/protocol/peer_connection_controls.h"
#include "remoting/protocol/session.h"
#include "remoting/protocol/session_config.h"
#include "remoting/protocol/video_frame_pump.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "third_party/webrtc/modules/desktop_capture/desktop_capturer.h"
namespace {
constexpr char kRtcLogTransferDataChannelPrefix[] = "rtc-log-transfer-";
constexpr char kStreamName[] = "screen_stream";
std::string StreamNameForId(webrtc::ScreenId id) {
return std::string(kStreamName) + "_" + base::NumberToString(id);
}
} // namespace
namespace remoting {
using protocol::ActionRequest;
ClientSession::VideoStreamWithComposer::VideoStreamWithComposer() = default;
ClientSession::VideoStreamWithComposer::~VideoStreamWithComposer() = default;
ClientSession::VideoStreamWithComposer::VideoStreamWithComposer(
VideoStreamWithComposer&&) = default;
ClientSession::VideoStreamWithComposer&
ClientSession::VideoStreamWithComposer::operator=(VideoStreamWithComposer&&) =
default;
ClientSession::ClientSession(
EventHandler* event_handler,
std::unique_ptr<protocol::ConnectionToClient> connection,
DesktopEnvironmentFactory* desktop_environment_factory,
const DesktopEnvironmentOptions& desktop_environment_options,
const base::TimeDelta& max_duration,
scoped_refptr<protocol::PairingRegistry> pairing_registry,
const std::vector<HostExtension*>& extensions)
: event_handler_(event_handler),
desktop_environment_factory_(desktop_environment_factory),
desktop_environment_options_(desktop_environment_options),
input_tracker_(&host_input_filter_),
remote_input_filter_(&input_tracker_),
mouse_clamping_filter_(&remote_input_filter_),
desktop_and_cursor_composer_notifier_(&mouse_clamping_filter_, this),
disable_input_filter_(&desktop_and_cursor_composer_notifier_),
host_clipboard_filter_(clipboard_echo_filter_.host_filter()),
client_clipboard_filter_(clipboard_echo_filter_.client_filter()),
client_clipboard_factory_(&client_clipboard_filter_),
max_duration_(max_duration),
pairing_registry_(pairing_registry),
connection_(std::move(connection)),
client_jid_(connection_->session()->jid()) {
connection_->session()->AddPlugin(&host_experiment_session_plugin_);
connection_->SetEventHandler(this);
// Create a manager for the configured extensions, if any.
extension_manager_ =
std::make_unique<HostExtensionSessionManager>(extensions, this);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS)
// LocalMouseInputMonitorWin and LocalPointerInputMonitorChromeos filter out
// an echo of the injected input before it reaches |remote_input_filter_|.
remote_input_filter_.SetExpectLocalEcho(false);
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS)
}
ClientSession::~ClientSession() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!audio_stream_);
DCHECK(!desktop_environment_);
DCHECK(!input_injector_);
DCHECK(!screen_controls_);
DCHECK(video_streams_.empty());
}
void ClientSession::NotifyClientResolution(
const protocol::ClientResolution& resolution) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(resolution.dips_width() >= 0 && resolution.dips_height() >= 0);
VLOG(1) << "Received ClientResolution (dips_width=" << resolution.dips_width()
<< ", dips_height=" << resolution.dips_height() << ")";
if (!screen_controls_)
return;
webrtc::DesktopSize client_size(resolution.dips_width(),
resolution.dips_height());
if (connection_->session()->config().protocol() ==
protocol::SessionConfig::Protocol::WEBRTC) {
// When using WebRTC round down the dimensions to multiple of 2. Otherwise
// the dimensions will be rounded on the receiver, which will cause blurring
// due to scaling. The resulting size is still close to the client size and
// will fit on the client's screen without scaling.
// TODO(sergeyu): Make WebRTC handle odd dimensions properly.
// crbug.com/636071
client_size.set(client_size.width() & (~1), client_size.height() & (~1));
}
// Try to match the client's resolution.
// TODO(sergeyu): Pass clients DPI to the resizer.
screen_controls_->SetScreenResolution(ScreenResolution(
client_size, webrtc::DesktopVector(kDefaultDpi, kDefaultDpi)));
}
void ClientSession::ControlVideo(const protocol::VideoControl& video_control) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Note that |video_stream_| may be null, depending upon whether
// extensions choose to wrap or "steal" the video capturer or encoder.
if (video_control.has_enable()) {
VLOG(1) << "Received VideoControl (enable=" << video_control.enable()
<< ")";
pause_video_ = !video_control.enable();
for (auto& video_stream : video_streams_) {
video_stream.stream->Pause(pause_video_);
}
}
if (video_control.has_lossless_encode()) {
VLOG(1) << "Received VideoControl (lossless_encode="
<< video_control.lossless_encode() << ")";
lossless_video_encode_ = video_control.lossless_encode();
for (auto& video_stream : video_streams_) {
video_stream.stream->SetLosslessEncode(lossless_video_encode_);
}
}
if (video_control.has_lossless_color()) {
VLOG(1) << "Received VideoControl (lossless_color="
<< video_control.lossless_color() << ")";
lossless_video_color_ = video_control.lossless_color();
for (auto& video_stream : video_streams_) {
video_stream.stream->SetLosslessColor(lossless_video_color_);
}
}
}
void ClientSession::ControlAudio(const protocol::AudioControl& audio_control) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (audio_control.has_enable()) {
VLOG(1) << "Received AudioControl (enable=" << audio_control.enable()
<< ")";
if (audio_stream_)
audio_stream_->Pause(!audio_control.enable());
}
}
void ClientSession::SetCapabilities(
const protocol::Capabilities& capabilities) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Ignore all the messages but the 1st one.
if (client_capabilities_) {
LOG(WARNING) << "protocol::Capabilities has been received already.";
return;
}
// Compute the set of capabilities supported by both client and host.
client_capabilities_ = std::make_unique<std::string>();
if (capabilities.has_capabilities())
*client_capabilities_ = capabilities.capabilities();
capabilities_ =
IntersectCapabilities(*client_capabilities_, host_capabilities_);
extension_manager_->OnNegotiatedCapabilities(connection_->client_stub(),
capabilities_);
if (HasCapability(capabilities_, protocol::kFileTransferCapability)) {
data_channel_manager_.RegisterCreateHandlerCallback(
kFileTransferDataChannelPrefix,
base::BindRepeating(&ClientSession::CreateFileTransferMessageHandler,
base::Unretained(this)));
}
if (HasCapability(capabilities_, protocol::kRtcLogTransferCapability)) {
data_channel_manager_.RegisterCreateHandlerCallback(
kRtcLogTransferDataChannelPrefix,
base::BindRepeating(&ClientSession::CreateRtcLogTransferMessageHandler,
base::Unretained(this)));
}
if (HasCapability(capabilities_, protocol::kRemoteOpenUrlCapability)) {
data_channel_manager_.RegisterCreateHandlerCallback(
kRemoteOpenUrlDataChannelName,
base::BindRepeating(&ClientSession::CreateRemoteOpenUrlMessageHandler,
base::Unretained(this)));
data_channel_manager_.RegisterCreateHandlerCallback(
UrlForwarderControlMessageHandler::kDataChannelName,
base::BindRepeating(
&ClientSession::CreateUrlForwarderControlMessageHandler,
base::Unretained(this)));
}
if (HasCapability(capabilities_, protocol::kRemoteWebAuthnCapability)) {
data_channel_manager_.RegisterCreateHandlerCallback(
kRemoteWebAuthnDataChannelName,
base::BindRepeating(&ClientSession::CreateRemoteWebAuthnMessageHandler,
base::Unretained(this)));
}
std::vector<ActionRequest::Action> supported_actions;
if (HasCapability(capabilities_, protocol::kSendAttentionSequenceAction))
supported_actions.push_back(ActionRequest::SEND_ATTENTION_SEQUENCE);
if (HasCapability(capabilities_, protocol::kLockWorkstationAction))
supported_actions.push_back(ActionRequest::LOCK_WORKSTATION);
if (supported_actions.size() > 0) {
// Register the action message handler.
data_channel_manager_.RegisterCreateHandlerCallback(
kActionDataChannelPrefix,
base::BindRepeating(&ClientSession::CreateActionMessageHandler,
base::Unretained(this),
std::move(supported_actions)));
}
if (HasCapability(capabilities_, protocol::kMultiStreamCapability)) {
if (desktop_display_info_.NumDisplays() != 0) {
// If display info is already known, create the initial video streams.
// Otherwise they will be created on the next displays-changed message.
CreatePerMonitorVideoStreams();
}
// Create a new DisplayInfoMonitor that is owned by this class (instead of
// being owned by the DesktopCapturerProxy for the single-stream case).
display_info_monitor_ = desktop_environment_->CreateDisplayInfoMonitor();
display_info_monitor_->Start();
}
VLOG(1) << "Client capabilities: " << *client_capabilities_;
desktop_environment_->SetCapabilities(capabilities_);
}
void ClientSession::RequestPairing(
const protocol::PairingRequest& pairing_request) {
if (pairing_registry_.get() && pairing_request.has_client_name()) {
protocol::PairingRegistry::Pairing pairing =
pairing_registry_->CreatePairing(pairing_request.client_name());
protocol::PairingResponse pairing_response;
pairing_response.set_client_id(pairing.client_id());
pairing_response.set_shared_secret(pairing.shared_secret());
connection_->client_stub()->SetPairingResponse(pairing_response);
}
}
void ClientSession::DeliverClientMessage(
const protocol::ExtensionMessage& message) {
if (message.has_type()) {
if (extension_manager_->OnExtensionMessage(message))
return;
DLOG(INFO) << "Unexpected message received: " << message.type() << ": "
<< message.data();
}
}
void ClientSession::SelectDesktopDisplay(
const protocol::SelectDesktopDisplayRequest& select_display) {
LOG(INFO) << "SelectDesktopDisplay "
<< "'" << select_display.id() << "'";
if (HasCapability(capabilities_, protocol::kMultiStreamCapability)) {
// TODO(lambroslambrou): Close the connection with a protocol error,
// once we are sure the client will not send this request after
// multi-stream has been negotiated.
LOG(ERROR) << "SelectDesktopDisplayRequest received after multi-stream is "
"enabled.";
return;
}
// Parse the string with the selected display. Note that this request's |id|
// field is not a monitor ID, but an index into the list of displays (or the
// special string "all"),
int new_index = webrtc::kInvalidScreenId;
if (select_display.id() == "all") {
new_index = webrtc::kFullDesktopScreenId;
} else {
if (!base::StringToInt(select_display.id().c_str(), &new_index)) {
LOG(ERROR) << " Unable to parse display index "
<< "'" << select_display.id() << "'";
new_index = webrtc::kInvalidScreenId;
}
if (!desktop_display_info_.GetDisplayInfo(new_index)) {
LOG(ERROR) << " Invalid display index "
<< "'" << select_display.id() << "'";
new_index = webrtc::kInvalidScreenId;
}
}
// Don't allow requests for fullscreen if not supported by the current
// display configuration.
if (!can_capture_full_desktop_ && new_index == webrtc::kFullDesktopScreenId) {
LOG(ERROR) << " Full desktop not supported";
new_index = webrtc::kInvalidScreenId;
}
// Fall back to default capture config if invalid request.
if (new_index == webrtc::kInvalidScreenId) {
LOG(ERROR) << " Invalid display specification, falling back to default";
new_index = can_capture_full_desktop_ ? webrtc::kFullDesktopScreenId : 0;
}
if (selected_display_index_ == new_index) {
LOG(INFO) << " Display " << new_index << " is already selected. Ignoring";
return;
}
const DisplayGeometry* oldGeo =
desktop_display_info_.GetDisplayInfo(selected_display_index_);
const DisplayGeometry* newGeo =
desktop_display_info_.GetDisplayInfo(new_index);
if (newGeo) {
video_streams_[0].stream->SelectSource(newGeo->id);
} else if (new_index == webrtc::kFullDesktopScreenId) {
video_streams_[0].stream->SelectSource(webrtc::kFullDesktopScreenId);
} else {
// This corner-case might occur if fullscreen capture is not supported, and
// the fallback default of 0 is not a valid index (the list is empty).
LOG(ERROR) << " Display geometry not found for index " << new_index;
return;
}
selected_display_index_ = new_index;
// If the old and new displays are the different sizes, then SelectSource()
// will trigger an OnVideoSizeChanged() message which will update the mouse
// filters.
// However, if the old and new displays are the exact same size, then the
// video size message will not be generated (because the size of the video
// has not changed). But we still need to update the mouse clamping filter
// with the new display origin, so we update that directly.
if (oldGeo != nullptr && newGeo != nullptr) {
if (oldGeo->width == newGeo->width && oldGeo->height == newGeo->height) {
UpdateMouseClampingFilterOffset();
}
}
}
void ClientSession::ControlPeerConnection(
const protocol::PeerConnectionParameters& parameters) {
if (!connection_->peer_connection_controls()) {
return;
}
absl::optional<int> min_bitrate_bps;
absl::optional<int> max_bitrate_bps;
bool set_preferred_bitrates = false;
if (parameters.has_preferred_min_bitrate_bps()) {
min_bitrate_bps = parameters.preferred_min_bitrate_bps();
set_preferred_bitrates = true;
}
if (parameters.has_preferred_max_bitrate_bps()) {
max_bitrate_bps = parameters.preferred_max_bitrate_bps();
set_preferred_bitrates = true;
}
if (set_preferred_bitrates) {
connection_->peer_connection_controls()->SetPreferredBitrates(
min_bitrate_bps, max_bitrate_bps);
}
if (parameters.request_ice_restart()) {
connection_->peer_connection_controls()->RequestIceRestart();
}
if (parameters.request_sdp_restart()) {
connection_->peer_connection_controls()->RequestSdpRestart();
}
}
void ClientSession::OnConnectionAuthenticating() {
event_handler_->OnSessionAuthenticating(this);
}
void ClientSession::OnConnectionAuthenticated() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!audio_stream_);
DCHECK(!desktop_environment_);
DCHECK(!input_injector_);
DCHECK(!screen_controls_);
DCHECK(video_streams_.empty());
is_authenticated_ = true;
desktop_display_info_.Reset();
if (max_duration_.is_positive()) {
max_duration_timer_.Start(
FROM_HERE, max_duration_,
base::BindOnce(&ClientSession::DisconnectSession,
base::Unretained(this), protocol::MAX_SESSION_LENGTH));
}
// Notify EventHandler.
event_handler_->OnSessionAuthenticated(this);
const SessionOptions session_options(
host_experiment_session_plugin_.configuration());
connection_->ApplySessionOptions(session_options);
DesktopEnvironmentOptions options = desktop_environment_options_;
options.ApplySessionOptions(session_options);
// Create the desktop environment. Drop the connection if it could not be
// created for any reason (for instance the curtain could not initialize).
desktop_environment_ = desktop_environment_factory_->Create(
client_session_control_weak_factory_.GetWeakPtr(),
client_session_events_weak_factory_.GetWeakPtr(), options);
if (!desktop_environment_) {
DisconnectSession(protocol::HOST_CONFIGURATION_ERROR);
return;
}
// Connect host stub.
connection_->set_host_stub(this);
// Collate the set of capabilities to offer the client, if it supports them.
host_capabilities_ = desktop_environment_->GetCapabilities();
if (!host_capabilities_.empty())
host_capabilities_.append(" ");
host_capabilities_.append(extension_manager_->GetCapabilities());
if (!host_capabilities_.empty())
host_capabilities_.append(" ");
host_capabilities_.append(protocol::kRtcLogTransferCapability);
host_capabilities_.append(" ");
host_capabilities_.append(protocol::kWebrtcIceSdpRestartAction);
// Create the object that controls the screen resolution.
screen_controls_ = desktop_environment_->CreateScreenControls();
// Create the event executor.
input_injector_ = desktop_environment_->CreateInputInjector();
// Connect the host input stubs.
connection_->set_input_stub(&disable_input_filter_);
host_input_filter_.set_input_stub(input_injector_.get());
if (desktop_environment_options_.clipboard_size().has_value()) {
int max_size = desktop_environment_options_.clipboard_size().value();
client_clipboard_filter_.set_max_size(max_size);
host_clipboard_filter_.set_max_size(max_size);
}
// Connect the clipboard stubs.
connection_->set_clipboard_stub(&host_clipboard_filter_);
clipboard_echo_filter_.set_host_stub(input_injector_.get());
clipboard_echo_filter_.set_client_stub(connection_->client_stub());
}
void ClientSession::CreateMediaStreams() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Create monitor to be used by the capturer. This is used only by
// DesktopCapturerProxy. With multi-process, the IpcDesktopEnvironment
// version of this method returns nullptr, and the monitor is created
// in the Desktop process instead (where DesktopCapturerProxy is used).
auto monitor = desktop_environment_->CreateDisplayInfoMonitor();
// Create a VideoStream to pump frames from the capturer to the client.
DCHECK(video_streams_.empty());
auto composer =
desktop_environment_->CreateComposingVideoCapturer(std::move(monitor));
VideoStreamWithComposer video_stream;
if (composer) {
video_stream.composer = composer->GetWeakPtr();
video_stream.stream =
connection_->StartVideoStream(kStreamName, std::move(composer));
} else {
video_stream.stream = connection_->StartVideoStream(
kStreamName,
desktop_environment_->CreateVideoCapturer(std::move(monitor)));
}
video_streams_.push_back(std::move(video_stream));
// Create an AudioStream to pump audio from the capturer to the client.
std::unique_ptr<protocol::AudioSource> audio_capturer =
desktop_environment_->CreateAudioCapturer();
if (audio_capturer) {
audio_stream_ = connection_->StartAudioStream(std::move(audio_capturer));
}
video_streams_[0].stream->SetObserver(this);
// Apply video-control parameters to the new stream.
video_streams_[0].stream->SetLosslessEncode(lossless_video_encode_);
video_streams_[0].stream->SetLosslessColor(lossless_video_color_);
// Pause capturing if necessary.
video_streams_[0].stream->Pause(pause_video_);
if (event_timestamp_source_for_tests_)
video_streams_[0].stream->SetEventTimestampsSource(
event_timestamp_source_for_tests_);
}
void ClientSession::CreatePerMonitorVideoStreams() {
DCHECK(desktop_display_info_.NumDisplays() > 0);
// Destroy any video-stream that was created for the single-stream case.
video_streams_.clear();
for (int i = 0; i < desktop_display_info_.NumDisplays(); i++) {
auto id = desktop_display_info_.GetDisplayInfo(i)->id;
auto stream_name = StreamNameForId(id);
LOG(INFO) << "Creating video stream: " << stream_name;
auto composer = desktop_environment_->CreateComposingVideoCapturer(
/* DesktopDisplayInfoMonitor */ nullptr);
VideoStreamWithComposer video_stream;
if (composer) {
video_stream.composer = composer->GetWeakPtr();
video_stream.stream =
connection_->StartVideoStream(stream_name, std::move(composer));
} else {
video_stream.stream = connection_->StartVideoStream(
stream_name, desktop_environment_->CreateVideoCapturer(
/* DesktopDisplayInfoMonitor */ nullptr));
}
video_stream.stream->SelectSource(id);
// SetObserver(this) is not called on the new video-stream, because
// per-monitor resizing should be handled by OnDesktopDisplayChanged()
// rather than OnVideoSizeChanged(). The latter would send out a legacy
// (non-extended) video-layout message, which may confuse the client when
// multi-stream is being used.
// Pause capturing if necessary.
video_stream.stream->Pause(pause_video_);
if (event_timestamp_source_for_tests_)
video_stream.stream->SetEventTimestampsSource(
event_timestamp_source_for_tests_);
video_streams_.push_back(std::move(video_stream));
}
}
void ClientSession::OnConnectionChannelsConnected() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!channels_connected_);
channels_connected_ = true;
// Negotiate capabilities with the client.
VLOG(1) << "Host capabilities: " << host_capabilities_;
protocol::Capabilities capabilities;
capabilities.set_capabilities(host_capabilities_);
connection_->client_stub()->SetCapabilities(capabilities);
// Start the event executor.
input_injector_->Start(CreateClipboardProxy());
SetDisableInputs(false);
// Create MouseShapePump to send mouse cursor shape.
mouse_shape_pump_ = std::make_unique<MouseShapePump>(
desktop_environment_->CreateMouseCursorMonitor(),
connection_->client_stub());
mouse_shape_pump_->SetMouseCursorMonitorCallback(this);
// Create KeyboardLayoutMonitor to send keyboard layout.
// Unretained is sound because callback will never be called after
// |keyboard_layout_monitor_| has been destroyed, and |connection_| (which
// owns the client stub) is guaranteed to outlive |keyboard_layout_monitor_|.
keyboard_layout_monitor_ = desktop_environment_->CreateKeyboardLayoutMonitor(
base::BindRepeating(&protocol::KeyboardLayoutStub::SetKeyboardLayout,
base::Unretained(connection_->client_stub())));
keyboard_layout_monitor_->Start();
if (pending_video_layout_message_) {
connection_->client_stub()->SetVideoLayout(*pending_video_layout_message_);
pending_video_layout_message_.reset();
}
// Notify the event handler that all our channels are now connected.
event_handler_->OnSessionChannelsConnected(this);
}
void ClientSession::OnConnectionClosed(protocol::ErrorCode error) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
HOST_LOG << "Client disconnected: " << client_jid_ << "; error = " << error;
// Ignore any further callbacks.
client_session_control_weak_factory_.InvalidateWeakPtrs();
client_session_events_weak_factory_.InvalidateWeakPtrs();
// If the client never authenticated then the session failed.
if (!is_authenticated_)
event_handler_->OnSessionAuthenticationFailed(this);
// Ensure that any pressed keys or buttons are released.
input_tracker_.ReleaseAll();
// Stop components access the client, audio or video stubs, which are no
// longer valid once ConnectionToClient calls OnConnectionClosed().
audio_stream_.reset();
mouse_shape_pump_.reset();
video_streams_.clear();
keyboard_layout_monitor_.reset();
client_clipboard_factory_.InvalidateWeakPtrs();
input_injector_.reset();
screen_controls_.reset();
desktop_environment_.reset();
// Notify the ChromotingHost that this client is disconnected.
event_handler_->OnSessionClosed(this);
}
void ClientSession::OnTransportProtocolChange(const std::string& protocol) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
HOST_LOG << "Transport protocol: " << protocol;
protocol::TransportInfo transport_info;
transport_info.set_protocol(protocol);
connection_->client_stub()->SetTransportInfo(transport_info);
}
void ClientSession::OnRouteChange(const std::string& channel_name,
const protocol::TransportRoute& route) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
event_handler_->OnSessionRouteChange(this, channel_name, route);
}
void ClientSession::OnIncomingDataChannel(
const std::string& channel_name,
std::unique_ptr<protocol::MessagePipe> pipe) {
data_channel_manager_.OnIncomingDataChannel(channel_name, std::move(pipe));
}
const std::string& ClientSession::client_jid() const {
return client_jid_;
}
void ClientSession::DisconnectSession(protocol::ErrorCode error) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(connection_.get());
max_duration_timer_.Stop();
// This triggers OnConnectionClosed(), and the session may be destroyed
// as the result, so this call must be the last in this method.
connection_->Disconnect(error);
}
void ClientSession::OnLocalKeyPressed(uint32_t usb_keycode) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
bool is_local = remote_input_filter_.LocalKeyPressed(usb_keycode);
if (is_local && desktop_environment_options_.terminate_upon_input())
DisconnectSession(protocol::OK);
}
void ClientSession::OnLocalPointerMoved(const webrtc::DesktopVector& position,
ui::EventType type) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
bool is_local = remote_input_filter_.LocalPointerMoved(position, type);
if (is_local) {
if (desktop_environment_options_.terminate_upon_input())
DisconnectSession(protocol::OK);
else
desktop_and_cursor_composer_notifier_.OnLocalInput();
}
}
void ClientSession::SetDisableInputs(bool disable_inputs) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (disable_inputs)
input_tracker_.ReleaseAll();
disable_input_filter_.set_enabled(!disable_inputs);
host_clipboard_filter_.set_enabled(!disable_inputs);
}
uint32_t ClientSession::desktop_session_id() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(desktop_environment_);
return desktop_environment_->GetDesktopSessionId();
}
ClientSessionControl* ClientSession::session_control() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return this;
}
void ClientSession::SetComposeEnabled(bool enabled) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
for (auto& video_stream : video_streams_) {
if (video_stream.composer)
video_stream.composer->SetComposeEnabled(enabled);
}
}
void ClientSession::OnMouseCursor(webrtc::MouseCursor* mouse_cursor) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// This method should take ownership of |mouse_cursor|.
std::unique_ptr<webrtc::MouseCursor> owned_cursor(mouse_cursor);
for (auto& video_stream : video_streams_) {
if (video_stream.composer) {
video_stream.composer->SetMouseCursor(
base::WrapUnique(webrtc::MouseCursor::CopyOf(*owned_cursor)));
}
}
}
void ClientSession::OnMouseCursorPosition(
const webrtc::DesktopVector& position) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
for (auto& video_stream : video_streams_) {
if (video_stream.composer)
video_stream.composer->SetMouseCursorPosition(position);
}
}
void ClientSession::BindReceiver(
mojo::PendingReceiver<mojom::ChromotingSessionServices> receiver) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
session_services_receivers_.Add(this, std::move(receiver));
}
void ClientSession::BindWebAuthnProxy(
mojo::PendingReceiver<mojom::WebAuthnProxy> receiver) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!remote_webauthn_message_handler_) {
LOG(WARNING)
<< "No WebAuthn message handler is found. Binding request rejected.";
return;
}
remote_webauthn_message_handler_->AddReceiver(std::move(receiver));
}
void ClientSession::BindRemoteUrlOpener(
mojo::PendingReceiver<mojom::RemoteUrlOpener> receiver) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!remote_open_url_message_handler_) {
LOG(WARNING) << "No RemoteOpenUrl message handler is found. Binding "
<< "request rejected.";
return;
}
remote_open_url_message_handler_->AddReceiver(std::move(receiver));
}
void ClientSession::RegisterCreateHandlerCallbackForTesting(
const std::string& prefix,
protocol::DataChannelManager::CreateHandlerCallback constructor) {
data_channel_manager_.RegisterCreateHandlerCallback(prefix,
std::move(constructor));
}
void ClientSession::SetEventTimestampsSourceForTests(
scoped_refptr<protocol::InputEventTimestampsSource>
event_timestamp_source) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
event_timestamp_source_for_tests_ = event_timestamp_source;
for (auto& video_stream : video_streams_)
video_stream.stream->SetEventTimestampsSource(
event_timestamp_source_for_tests_);
}
std::unique_ptr<protocol::ClipboardStub> ClientSession::CreateClipboardProxy() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return std::make_unique<protocol::ClipboardThreadProxy>(
client_clipboard_factory_.GetWeakPtr(),
base::ThreadTaskRunnerHandle::Get());
}
void ClientSession::SetMouseClampingFilter(const DisplaySize& size) {
UpdateMouseClampingFilterOffset();
mouse_clamping_filter_.set_output_size(size.WidthAsPixels(),
size.HeightAsPixels());
switch (connection_->session()->config().protocol()) {
case protocol::SessionConfig::Protocol::ICE:
mouse_clamping_filter_.set_input_size(size.WidthAsPixels(),
size.HeightAsPixels());
break;
case protocol::SessionConfig::Protocol::WEBRTC: {
#if BUILDFLAG(IS_APPLE)
mouse_clamping_filter_.set_input_size(size.WidthAsPixels(),
size.HeightAsPixels());
#else
// When using the WebRTC protocol the client sends mouse coordinates in
// DIPs, while InputInjector expects them in physical pixels.
// TODO(sergeyu): Fix InputInjector implementations to use DIPs as well.
mouse_clamping_filter_.set_input_size(size.WidthAsDips(),
size.HeightAsDips());
#endif // BUILDFLAG(IS_APPLE)
}
}
}
void ClientSession::UpdateMouseClampingFilterOffset() {
if (selected_display_index_ == webrtc::kInvalidScreenId)
return;
webrtc::DesktopVector origin;
origin = desktop_display_info_.CalcDisplayOffset(selected_display_index_);
mouse_clamping_filter_.set_output_offset(origin);
}
void ClientSession::OnVideoSizeChanged(protocol::VideoStream* video_stream,
const webrtc::DesktopSize& size_px,
const webrtc::DesktopVector& dpi) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
LOG(INFO) << "ClientSession::OnVideoSizeChanged";
DisplaySize size =
DisplaySize::FromPixels(size_px.width(), size_px.height(), dpi.x());
LOG(INFO) << " DisplaySize: " << size
<< " (size in pixels: " << size_px.width() << "x"
<< size_px.height() << ")";
// The first video size message that we receive from WebRtc is the full
// desktop size (if supported). If full desktop capture is not supported,
// then this will be the size of the default display.
if (default_webrtc_desktop_size_.IsEmpty()) {
default_webrtc_desktop_size_ = size;
LOG(INFO) << " display index " << selected_display_index_;
LOG(INFO) << " Recording default webrtc capture size "
<< default_webrtc_desktop_size_;
}
webrtc_capture_size_ = size;
SetMouseClampingFilter(size);
// Record default DPI in case a display reports 0 for DPI.
default_x_dpi_ = dpi.x();
default_y_dpi_ = dpi.y();
if (dpi.x() != dpi.y()) {
LOG(WARNING) << "Mismatch x,y dpi. x=" << dpi.x() << " y=" << dpi.y();
}
if (connection_->session()->config().protocol() !=
protocol::SessionConfig::Protocol::WEBRTC) {
return;
}
// Generate and send VideoLayout message.
protocol::VideoLayout layout;
protocol::VideoTrackLayout* video_track = layout.add_video_track();
video_track->set_position_x(0);
video_track->set_position_y(0);
video_track->set_width(size.WidthAsDips());
video_track->set_height(size.HeightAsDips());
video_track->set_x_dpi(dpi.x());
video_track->set_y_dpi(dpi.y());
// VideoLayout can be sent only after the control channel is connected.
// TODO(sergeyu): Change client_stub() implementation to allow queuing
// while connection is being established.
if (channels_connected_) {
connection_->client_stub()->SetVideoLayout(layout);
} else {
pending_video_layout_message_ =
std::make_unique<protocol::VideoLayout>(layout);
}
}
void ClientSession::OnDesktopDisplayChanged(
std::unique_ptr<protocol::VideoLayout> displays) {
LOG(INFO) << "ClientSession::OnDesktopDisplayChanged";
bool firstDisplayChangedMessage = (desktop_display_info_.NumDisplays() == 0);
bool multiStreamEnabled =
HasCapability(capabilities_, protocol::kMultiStreamCapability);
// Scan display list to calculate the full desktop size.
int min_x = 0;
int max_x = 0;
int min_y = 0;
int max_y = 0;
int dpi_x = 0;
int dpi_y = 0;
LOG(INFO) << " Scanning display info... (dips)";
for (int display_id = 0; display_id < displays->video_track_size();
display_id++) {
protocol::VideoTrackLayout track = displays->video_track(display_id);
LOG(INFO) << " #" << display_id << " : " << track.position_x() << ","
<< track.position_y() << " " << track.width() << "x"
<< track.height() << " [" << track.x_dpi() << "," << track.y_dpi()
<< "], screen_id=" << track.screen_id();
if (dpi_x == 0)
dpi_x = track.x_dpi();
if (dpi_y == 0)
dpi_y = track.y_dpi();
int x = track.position_x();
int y = track.position_y();
min_x = std::min(x, min_x);
min_y = std::min(y, min_y);
max_x = std::max(x + track.width(), max_x);
max_y = std::max(y + track.height(), max_y);
}
// TODO(garykac): Investigate why these DPI values are 0 for some users.
if (dpi_x == 0)
dpi_x = default_x_dpi_;
if (dpi_y == 0)
dpi_y = default_y_dpi_;
// Calc desktop scaled geometry (in DIPs)
// See comment in OnVideoSizeChanged() for details.
const webrtc::DesktopSize size(max_x - min_x, max_y - min_y);
// If this is our first message, then we need to determine if the current
// display configuration supports capturing the entire desktop.
LOG(INFO) << " Webrtc desktop size " << default_webrtc_desktop_size_;
if (selected_display_index_ == webrtc::kInvalidScreenId) {
#if BUILDFLAG(IS_APPLE)
// On MacOS, there are situations where webrtc cannot capture the entire
// desktop (e.g, when there are displays with different DPIs). We detect
// this situation by comparing the full desktop size (calculated above
// from the displays) and the size of the initial webrtc capture (which
// defaults to the full desktop if supported).
if (size.width() == default_webrtc_desktop_size_.WidthAsDips() &&
size.height() == default_webrtc_desktop_size_.HeightAsDips()) {
LOG(INFO) << " Full desktop capture supported.";
can_capture_full_desktop_ = true;
} else {
LOG(INFO)
<< " This configuration does not support full desktop capture.";
can_capture_full_desktop_ = false;
}
#elif BUILDFLAG(IS_CHROMEOS)
can_capture_full_desktop_ = false;
#else
// Windows/Linux can capture full desktop if multiple displays.
can_capture_full_desktop_ = true;
#endif // BUILDFLAG(IS_APPLE)
}
// Generate and send VideoLayout message.
protocol::VideoLayout layout;
layout.set_supports_full_desktop_capture(can_capture_full_desktop_);
protocol::VideoTrackLayout* video_track;
// For single-stream clients, the first layout must be the current webrtc
// capture size. This is required because we reuse the same message for both
// VideoSizeChanged (which is used to scale mouse coordinates) and
// DisplayDesktopChanged. Multi-stream clients will ignore the legacy layout
// message, except that the width/height must be non-zero (the first
// display-changed event may occur before |webrtc_capture_size_| becomes
// non-zero).
video_track = layout.add_video_track();
video_track->set_position_x(0);
video_track->set_position_y(0);
video_track->set_width(
multiStreamEnabled ? 1 : webrtc_capture_size_.WidthAsDips());
video_track->set_height(
multiStreamEnabled ? 1 : webrtc_capture_size_.HeightAsDips());
video_track->set_x_dpi(dpi_x);
video_track->set_y_dpi(dpi_y);
LOG(INFO) << " Webrtc capture size (DIPS) = 0,0 "
<< default_webrtc_desktop_size_;
// Add raw geometry for entire desktop (in DIPs).
video_track = layout.add_video_track();
video_track->set_position_x(0);
video_track->set_position_y(0);
video_track->set_width(size.width());
video_track->set_height(size.height());
video_track->set_x_dpi(dpi_x);
video_track->set_y_dpi(dpi_y);
LOG(INFO) << " Full Desktop (DIPS) = 0,0 " << size.width() << "x"
<< size.height() << " [" << dpi_x << "," << dpi_y << "]";
// Add a VideoTrackLayout entry for each separate display.
desktop_display_info_.Reset();
for (int display_id = 0; display_id < displays->video_track_size();
display_id++) {
protocol::VideoTrackLayout display = displays->video_track(display_id);
desktop_display_info_.AddDisplayFrom(display);
video_track = layout.add_video_track();
video_track->CopyFrom(display);
if (multiStreamEnabled) {
video_track->set_media_stream_id(StreamNameForId(display.screen_id()));
}
LOG(INFO) << " Display " << display_id << " = " << display.position_x()
<< "," << display.position_y() << " " << display.width() << "x"
<< display.height() << " [" << display.x_dpi() << ","
<< display.y_dpi() << "], screen_id=" << display.screen_id();
}
// Set the display index, if this is the first message being processed.
if (selected_display_index_ == webrtc::kInvalidScreenId) {
if (can_capture_full_desktop_) {
selected_display_index_ = webrtc::kFullDesktopScreenId;
} else {
// Select the default display.
protocol::SelectDesktopDisplayRequest req;
req.set_id("0");
SelectDesktopDisplay(req);
}
}
// We need to update the input filters whenever the displays change.
DisplaySize display_size =
DisplaySize::FromPixels(size.width(), size.height(), default_x_dpi_);
SetMouseClampingFilter(display_size);
connection_->client_stub()->SetVideoLayout(layout);
// If multi-stream is enabled, create the initial video-streams for the
// first display-changed message.
// TODO(lambroslambrou): Create/destroy streams on subsequent
// display-changed messages (based on added/removed monitors).
if (firstDisplayChangedMessage && multiStreamEnabled) {
CreatePerMonitorVideoStreams();
}
}
void ClientSession::OnDesktopAttached(uint32_t session_id) {
if (remote_webauthn_message_handler_) {
// On Windows, only processes running on an attached desktop session can
// bind ChromotingHostServices, so we notify the extension that it might be
// able to connect now.
remote_webauthn_message_handler_->NotifyWebAuthnStateChange();
}
}
void ClientSession::OnDesktopDetached() {
// Clear ChromotingSessionServices receivers and all other receivers brokered
// by ChromotingSessionServices, as they are scoped to desktop session that
// is being detached.
// TODO(yuweih): If we decide to start the IPC server per remote session, then
// we may just stop the server here instead, which will automatically
// disconnect all ongoing IPCs.
session_services_receivers_.Clear();
if (remote_webauthn_message_handler_) {
remote_webauthn_message_handler_->ClearReceivers();
remote_webauthn_message_handler_->NotifyWebAuthnStateChange();
}
if (remote_open_url_message_handler_) {
remote_open_url_message_handler_->ClearReceivers();
}
}
void ClientSession::CreateFileTransferMessageHandler(
const std::string& channel_name,
std::unique_ptr<protocol::MessagePipe> pipe) {
// FileTransferMessageHandler manages its own lifetime and is tied to the
// lifetime of |pipe|. Once |pipe| is closed, this instance will be cleaned
// up.
new FileTransferMessageHandler(channel_name, std::move(pipe),
desktop_environment_->CreateFileOperations());
}
void ClientSession::CreateRtcLogTransferMessageHandler(
const std::string& channel_name,
std::unique_ptr<protocol::MessagePipe> pipe) {
new FileTransferMessageHandler(
channel_name, std::move(pipe),
std::make_unique<RtcLogFileOperations>(connection_.get()));
}
void ClientSession::CreateActionMessageHandler(
std::vector<ActionRequest::Action> capabilities,
const std::string& channel_name,
std::unique_ptr<protocol::MessagePipe> pipe) {
std::unique_ptr<ActionExecutor> action_executor =
desktop_environment_->CreateActionExecutor();
if (!action_executor)
return;
// ActionMessageHandler manages its own lifetime and is tied to the lifetime
// of |pipe|. Once |pipe| is closed, this instance will be cleaned up.
new ActionMessageHandler(channel_name, capabilities, std::move(pipe),
std::move(action_executor));
}
void ClientSession::CreateRemoteOpenUrlMessageHandler(
const std::string& channel_name,
std::unique_ptr<protocol::MessagePipe> pipe) {
// RemoteOpenUrlMessageHandler manages its own lifetime and is tied to the
// lifetime of |pipe|. Once |pipe| is closed, this instance will be cleaned
// up.
auto* unowned_handler =
new RemoteOpenUrlMessageHandler(channel_name, std::move(pipe));
remote_open_url_message_handler_ = unowned_handler->GetWeakPtr();
}
void ClientSession::CreateUrlForwarderControlMessageHandler(
const std::string& channel_name,
std::unique_ptr<protocol::MessagePipe> pipe) {
// UrlForwarderControlMessageHandler manages its own lifetime and is tied to
// the lifetime of |pipe|. Once |pipe| is closed, this instance will be
// cleaned up.
new UrlForwarderControlMessageHandler(
desktop_environment_->CreateUrlForwarderConfigurator(), channel_name,
std::move(pipe));
}
void ClientSession::CreateRemoteWebAuthnMessageHandler(
const std::string& channel_name,
std::unique_ptr<protocol::MessagePipe> pipe) {
// RemoteWebAuthnMessageHandler manages its own lifetime and is tied to the
// lifetime of |pipe|. Once |pipe| is closed, this instance will be cleaned
// up.
auto* unowned_handler =
new RemoteWebAuthnMessageHandler(channel_name, std::move(pipe));
remote_webauthn_message_handler_ = unowned_handler->GetWeakPtr();
}
} // namespace remoting