blob: 78e3918b8611ef3e0e1539cf878ce719da3d744e [file] [log] [blame]
// media/mojo/mojom/video_encode_accelerator.mojom-blink.cc is auto generated by mojom_bindings_generator.py, do not edit
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-private-field"
#endif
#include "media/mojo/mojom/video_encode_accelerator.mojom-blink.h"
#include <math.h>
#include <stdint.h>
#include <utility>
#include "base/debug/alias.h"
#include "base/hash/md5_constexpr.h"
#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/typed_macros.h"
#include "mojo/public/cpp/bindings/lib/generated_code_util.h"
#include "mojo/public/cpp/bindings/lib/message_internal.h"
#include "mojo/public/cpp/bindings/lib/send_message_helper.h"
#include "mojo/public/cpp/bindings/lib/proxy_to_responder.h"
#include "mojo/public/cpp/bindings/lib/serialization_util.h"
#include "mojo/public/cpp/bindings/lib/unserialized_message_context.h"
#include "mojo/public/cpp/bindings/lib/validate_params.h"
#include "mojo/public/cpp/bindings/lib/validation_errors.h"
#include "mojo/public/cpp/bindings/mojo_buildflags.h"
#include "mojo/public/interfaces/bindings/interface_control_messages.mojom.h"
#include "third_party/perfetto/include/perfetto/tracing/traced_value.h"
#include "media/mojo/mojom/video_encode_accelerator.mojom-params-data.h"
#include "media/mojo/mojom/video_encode_accelerator.mojom-shared-message-ids.h"
#include "media/mojo/mojom/video_encode_accelerator.mojom-blink-import-headers.h"
#include "media/mojo/mojom/video_encode_accelerator.mojom-blink-test-utils.h"
#include "mojo/public/cpp/bindings/lib/wtf_serialization.h"
#ifndef MEDIA_MOJO_MOJOM_VIDEO_ENCODE_ACCELERATOR_MOJOM_BLINK_JUMBO_H_
#define MEDIA_MOJO_MOJOM_VIDEO_ENCODE_ACCELERATOR_MOJOM_BLINK_JUMBO_H_
#endif
namespace media {
namespace mojom {
namespace blink {
VideoEncodeAcceleratorSupportedProfile::VideoEncodeAcceleratorSupportedProfile()
: profile(),
min_resolution(),
max_resolution(),
max_framerate_numerator(),
max_framerate_denominator(),
rate_control_modes(),
scalability_modes() {}
VideoEncodeAcceleratorSupportedProfile::VideoEncodeAcceleratorSupportedProfile(
::media::mojom::blink::VideoCodecProfile profile_in,
const ::gfx::Size& min_resolution_in,
const ::gfx::Size& max_resolution_in,
uint32_t max_framerate_numerator_in,
uint32_t max_framerate_denominator_in,
WTF::Vector<VideoEncodeAcceleratorSupportedRateControlMode> rate_control_modes_in,
WTF::Vector<::media::mojom::blink::SVCScalabilityMode> scalability_modes_in)
: profile(std::move(profile_in)),
min_resolution(std::move(min_resolution_in)),
max_resolution(std::move(max_resolution_in)),
max_framerate_numerator(std::move(max_framerate_numerator_in)),
max_framerate_denominator(std::move(max_framerate_denominator_in)),
rate_control_modes(std::move(rate_control_modes_in)),
scalability_modes(std::move(scalability_modes_in)) {}
VideoEncodeAcceleratorSupportedProfile::~VideoEncodeAcceleratorSupportedProfile() = default;
void VideoEncodeAcceleratorSupportedProfile::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"profile"), this->profile,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type ::media::mojom::blink::VideoCodecProfile>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"min_resolution"), this->min_resolution,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const ::gfx::Size&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"max_resolution"), this->max_resolution,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const ::gfx::Size&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"max_framerate_numerator"), this->max_framerate_numerator,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"max_framerate_denominator"), this->max_framerate_denominator,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"rate_control_modes"), this->rate_control_modes,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const WTF::Vector<VideoEncodeAcceleratorSupportedRateControlMode>&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"scalability_modes"), this->scalability_modes,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const WTF::Vector<::media::mojom::blink::SVCScalabilityMode>&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool VideoEncodeAcceleratorSupportedProfile::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
VariableBitratePeak::VariableBitratePeak()
: bps() {}
VariableBitratePeak::VariableBitratePeak(
uint32_t bps_in)
: bps(std::move(bps_in)) {}
VariableBitratePeak::~VariableBitratePeak() = default;
size_t VariableBitratePeak::Hash(size_t seed) const {
seed = mojo::internal::WTFHash(seed, this->bps);
return seed;
}
void VariableBitratePeak::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"bps"), this->bps,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool VariableBitratePeak::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
VideoBitrateAllocation::VideoBitrateAllocation()
: bitrates(),
variable_bitrate_peak() {}
VideoBitrateAllocation::VideoBitrateAllocation(
WTF::Vector<uint32_t> bitrates_in,
VariableBitratePeakPtr variable_bitrate_peak_in)
: bitrates(std::move(bitrates_in)),
variable_bitrate_peak(std::move(variable_bitrate_peak_in)) {}
VideoBitrateAllocation::~VideoBitrateAllocation() = default;
void VideoBitrateAllocation::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"bitrates"), this->bitrates,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const WTF::Vector<uint32_t>&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"variable_bitrate_peak"), this->variable_bitrate_peak,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type VariableBitratePeakPtr>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool VideoBitrateAllocation::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
SpatialLayer::SpatialLayer()
: width(),
height(),
bitrate_bps(),
framerate(),
max_qp(),
num_of_temporal_layers() {}
SpatialLayer::SpatialLayer(
int32_t width_in,
int32_t height_in,
uint32_t bitrate_bps_in,
uint32_t framerate_in,
uint8_t max_qp_in,
uint8_t num_of_temporal_layers_in)
: width(std::move(width_in)),
height(std::move(height_in)),
bitrate_bps(std::move(bitrate_bps_in)),
framerate(std::move(framerate_in)),
max_qp(std::move(max_qp_in)),
num_of_temporal_layers(std::move(num_of_temporal_layers_in)) {}
SpatialLayer::~SpatialLayer() = default;
size_t SpatialLayer::Hash(size_t seed) const {
seed = mojo::internal::WTFHash(seed, this->width);
seed = mojo::internal::WTFHash(seed, this->height);
seed = mojo::internal::WTFHash(seed, this->bitrate_bps);
seed = mojo::internal::WTFHash(seed, this->framerate);
seed = mojo::internal::WTFHash(seed, this->max_qp);
seed = mojo::internal::WTFHash(seed, this->num_of_temporal_layers);
return seed;
}
void SpatialLayer::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"width"), this->width,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type int32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"height"), this->height,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type int32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"bitrate_bps"), this->bitrate_bps,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"framerate"), this->framerate,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"max_qp"), this->max_qp,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"num_of_temporal_layers"), this->num_of_temporal_layers,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool SpatialLayer::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
ConstantBitrate::ConstantBitrate()
: target_bps(0U) {}
ConstantBitrate::ConstantBitrate(
uint32_t target_bps_in)
: target_bps(std::move(target_bps_in)) {}
ConstantBitrate::~ConstantBitrate() = default;
size_t ConstantBitrate::Hash(size_t seed) const {
seed = mojo::internal::WTFHash(seed, this->target_bps);
return seed;
}
void ConstantBitrate::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"target_bps"), this->target_bps,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool ConstantBitrate::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
VariableBitrate::VariableBitrate()
: target_bps(0U),
peak_bps() {}
VariableBitrate::VariableBitrate(
uint32_t target_bps_in,
uint32_t peak_bps_in)
: target_bps(std::move(target_bps_in)),
peak_bps(std::move(peak_bps_in)) {}
VariableBitrate::~VariableBitrate() = default;
size_t VariableBitrate::Hash(size_t seed) const {
seed = mojo::internal::WTFHash(seed, this->target_bps);
seed = mojo::internal::WTFHash(seed, this->peak_bps);
return seed;
}
void VariableBitrate::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"target_bps"), this->target_bps,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"peak_bps"), this->peak_bps,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool VariableBitrate::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
VideoEncodeAcceleratorConfig::VideoEncodeAcceleratorConfig()
: input_format(),
input_visible_size(),
output_profile(),
bitrate(),
initial_framerate(),
has_initial_framerate(),
gop_length(),
has_gop_length(),
h264_output_level(),
has_h264_output_level(),
is_constrained_h264(),
storage_type(),
has_storage_type(),
content_type(),
spatial_layers(),
inter_layer_pred(),
require_low_delay() {}
VideoEncodeAcceleratorConfig::VideoEncodeAcceleratorConfig(
::media::mojom::blink::VideoPixelFormat input_format_in,
const ::gfx::Size& input_visible_size_in,
::media::mojom::blink::VideoCodecProfile output_profile_in,
BitratePtr bitrate_in,
uint32_t initial_framerate_in,
bool has_initial_framerate_in,
uint32_t gop_length_in,
bool has_gop_length_in,
uint8_t h264_output_level_in,
bool has_h264_output_level_in,
bool is_constrained_h264_in,
VideoEncodeAcceleratorConfig::StorageType storage_type_in,
bool has_storage_type_in,
VideoEncodeAcceleratorConfig::ContentType content_type_in,
WTF::Vector<SpatialLayerPtr> spatial_layers_in,
VideoEncodeAcceleratorConfig::InterLayerPredMode inter_layer_pred_in,
bool require_low_delay_in)
: input_format(std::move(input_format_in)),
input_visible_size(std::move(input_visible_size_in)),
output_profile(std::move(output_profile_in)),
bitrate(std::move(bitrate_in)),
initial_framerate(std::move(initial_framerate_in)),
has_initial_framerate(std::move(has_initial_framerate_in)),
gop_length(std::move(gop_length_in)),
has_gop_length(std::move(has_gop_length_in)),
h264_output_level(std::move(h264_output_level_in)),
has_h264_output_level(std::move(has_h264_output_level_in)),
is_constrained_h264(std::move(is_constrained_h264_in)),
storage_type(std::move(storage_type_in)),
has_storage_type(std::move(has_storage_type_in)),
content_type(std::move(content_type_in)),
spatial_layers(std::move(spatial_layers_in)),
inter_layer_pred(std::move(inter_layer_pred_in)),
require_low_delay(std::move(require_low_delay_in)) {}
VideoEncodeAcceleratorConfig::~VideoEncodeAcceleratorConfig() = default;
void VideoEncodeAcceleratorConfig::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"input_format"), this->input_format,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type ::media::mojom::blink::VideoPixelFormat>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"input_visible_size"), this->input_visible_size,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const ::gfx::Size&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"output_profile"), this->output_profile,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type ::media::mojom::blink::VideoCodecProfile>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"bitrate"), this->bitrate,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type BitratePtr>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"initial_framerate"), this->initial_framerate,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"has_initial_framerate"), this->has_initial_framerate,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"gop_length"), this->gop_length,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"has_gop_length"), this->has_gop_length,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"h264_output_level"), this->h264_output_level,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"has_h264_output_level"), this->has_h264_output_level,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"is_constrained_h264"), this->is_constrained_h264,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"storage_type"), this->storage_type,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type VideoEncodeAcceleratorConfig::StorageType>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"has_storage_type"), this->has_storage_type,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"content_type"), this->content_type,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type VideoEncodeAcceleratorConfig::ContentType>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"spatial_layers"), this->spatial_layers,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type WTF::Vector<SpatialLayerPtr>>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"inter_layer_pred"), this->inter_layer_pred,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type VideoEncodeAcceleratorConfig::InterLayerPredMode>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"require_low_delay"), this->require_low_delay,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool VideoEncodeAcceleratorConfig::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
H264Metadata::H264Metadata()
: temporal_idx(),
layer_sync() {}
H264Metadata::H264Metadata(
uint8_t temporal_idx_in,
bool layer_sync_in)
: temporal_idx(std::move(temporal_idx_in)),
layer_sync(std::move(layer_sync_in)) {}
H264Metadata::~H264Metadata() = default;
size_t H264Metadata::Hash(size_t seed) const {
seed = mojo::internal::WTFHash(seed, this->temporal_idx);
seed = mojo::internal::WTFHash(seed, this->layer_sync);
return seed;
}
void H264Metadata::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"temporal_idx"), this->temporal_idx,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"layer_sync"), this->layer_sync,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool H264Metadata::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
Vp8Metadata::Vp8Metadata()
: non_reference(),
temporal_idx(),
layer_sync() {}
Vp8Metadata::Vp8Metadata(
bool non_reference_in,
uint8_t temporal_idx_in,
bool layer_sync_in)
: non_reference(std::move(non_reference_in)),
temporal_idx(std::move(temporal_idx_in)),
layer_sync(std::move(layer_sync_in)) {}
Vp8Metadata::~Vp8Metadata() = default;
size_t Vp8Metadata::Hash(size_t seed) const {
seed = mojo::internal::WTFHash(seed, this->non_reference);
seed = mojo::internal::WTFHash(seed, this->temporal_idx);
seed = mojo::internal::WTFHash(seed, this->layer_sync);
return seed;
}
void Vp8Metadata::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"non_reference"), this->non_reference,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"temporal_idx"), this->temporal_idx,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"layer_sync"), this->layer_sync,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool Vp8Metadata::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
Vp9Metadata::Vp9Metadata()
: inter_pic_predicted(),
temporal_up_switch(),
referenced_by_upper_spatial_layers(),
reference_lower_spatial_layers(),
end_of_picture(),
temporal_idx(),
spatial_idx(),
spatial_layer_resolutions(),
p_diffs() {}
Vp9Metadata::Vp9Metadata(
bool inter_pic_predicted_in,
bool temporal_up_switch_in,
bool referenced_by_upper_spatial_layers_in,
bool reference_lower_spatial_layers_in,
bool end_of_picture_in,
uint8_t temporal_idx_in,
uint8_t spatial_idx_in,
WTF::Vector<::gfx::Size> spatial_layer_resolutions_in,
WTF::Vector<uint8_t> p_diffs_in)
: inter_pic_predicted(std::move(inter_pic_predicted_in)),
temporal_up_switch(std::move(temporal_up_switch_in)),
referenced_by_upper_spatial_layers(std::move(referenced_by_upper_spatial_layers_in)),
reference_lower_spatial_layers(std::move(reference_lower_spatial_layers_in)),
end_of_picture(std::move(end_of_picture_in)),
temporal_idx(std::move(temporal_idx_in)),
spatial_idx(std::move(spatial_idx_in)),
spatial_layer_resolutions(std::move(spatial_layer_resolutions_in)),
p_diffs(std::move(p_diffs_in)) {}
Vp9Metadata::~Vp9Metadata() = default;
void Vp9Metadata::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"inter_pic_predicted"), this->inter_pic_predicted,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"temporal_up_switch"), this->temporal_up_switch,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"referenced_by_upper_spatial_layers"), this->referenced_by_upper_spatial_layers,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"reference_lower_spatial_layers"), this->reference_lower_spatial_layers,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"end_of_picture"), this->end_of_picture,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"temporal_idx"), this->temporal_idx,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"spatial_idx"), this->spatial_idx,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"spatial_layer_resolutions"), this->spatial_layer_resolutions,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const WTF::Vector<::gfx::Size>&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"p_diffs"), this->p_diffs,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const WTF::Vector<uint8_t>&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool Vp9Metadata::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
Av1Metadata::Av1Metadata()
: inter_pic_predicted(),
switch_frame(),
end_of_picture(),
temporal_idx(),
spatial_idx(),
spatial_layer_resolutions(),
f_diffs() {}
Av1Metadata::Av1Metadata(
bool inter_pic_predicted_in,
bool switch_frame_in,
bool end_of_picture_in,
uint8_t temporal_idx_in,
uint8_t spatial_idx_in,
WTF::Vector<::gfx::Size> spatial_layer_resolutions_in,
WTF::Vector<uint8_t> f_diffs_in)
: inter_pic_predicted(std::move(inter_pic_predicted_in)),
switch_frame(std::move(switch_frame_in)),
end_of_picture(std::move(end_of_picture_in)),
temporal_idx(std::move(temporal_idx_in)),
spatial_idx(std::move(spatial_idx_in)),
spatial_layer_resolutions(std::move(spatial_layer_resolutions_in)),
f_diffs(std::move(f_diffs_in)) {}
Av1Metadata::~Av1Metadata() = default;
void Av1Metadata::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"inter_pic_predicted"), this->inter_pic_predicted,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"switch_frame"), this->switch_frame,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"end_of_picture"), this->end_of_picture,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"temporal_idx"), this->temporal_idx,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"spatial_idx"), this->spatial_idx,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint8_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"spatial_layer_resolutions"), this->spatial_layer_resolutions,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const WTF::Vector<::gfx::Size>&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"f_diffs"), this->f_diffs,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type const WTF::Vector<uint8_t>&>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool Av1Metadata::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
BitstreamBufferMetadata::BitstreamBufferMetadata()
: payload_size_bytes(),
key_frame(),
timestamp(),
qp(),
codec_metadata() {}
BitstreamBufferMetadata::BitstreamBufferMetadata(
uint32_t payload_size_bytes_in,
bool key_frame_in,
::base::TimeDelta timestamp_in,
int32_t qp_in,
CodecMetadataPtr codec_metadata_in)
: payload_size_bytes(std::move(payload_size_bytes_in)),
key_frame(std::move(key_frame_in)),
timestamp(std::move(timestamp_in)),
qp(std::move(qp_in)),
codec_metadata(std::move(codec_metadata_in)) {}
BitstreamBufferMetadata::~BitstreamBufferMetadata() = default;
void BitstreamBufferMetadata::WriteIntoTrace(
perfetto::TracedValue traced_context) const {
[[maybe_unused]] auto dict = std::move(traced_context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"payload_size_bytes"), this->payload_size_bytes,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type uint32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"key_frame"), this->key_frame,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type bool>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"timestamp"), this->timestamp,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type ::base::TimeDelta>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"qp"), this->qp,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type int32_t>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem(
"codec_metadata"), this->codec_metadata,
#if BUILDFLAG(MOJO_TRACE_ENABLED)
"<value of type CodecMetadataPtr>"
#else
"<value>"
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
);
}
bool BitstreamBufferMetadata::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context);
}
Bitrate::Bitrate() : tag_(Tag::kConstant) {
data_.constant = new ConstantBitratePtr;
}
Bitrate::~Bitrate() {
DestroyActive();
}
void Bitrate::set_constant(
ConstantBitratePtr constant) {
if (tag_ == Tag::kConstant) {
*(data_.constant) = std::move(constant);
} else {
DestroyActive();
tag_ = Tag::kConstant;
data_.constant = new ConstantBitratePtr(
std::move(constant));
}
}
void Bitrate::set_variable(
VariableBitratePtr variable) {
if (tag_ == Tag::kVariable) {
*(data_.variable) = std::move(variable);
} else {
DestroyActive();
tag_ = Tag::kVariable;
data_.variable = new VariableBitratePtr(
std::move(variable));
}
}
void Bitrate::DestroyActive() {
switch (tag_) {
case Tag::kConstant:
delete data_.constant;
break;
case Tag::kVariable:
delete data_.variable;
break;
}
}
size_t Bitrate::Hash(size_t seed) const {
seed = mojo::internal::HashCombine(seed, static_cast<uint32_t>(tag_));
switch (tag_) {
case Tag::kConstant:
return mojo::internal::WTFHash(seed, data_.constant);
case Tag::kVariable:
return mojo::internal::WTFHash(seed, data_.variable);
default:
NOTREACHED();
return seed;
}
}
bool Bitrate::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context, false);
}
CodecMetadata::CodecMetadata() : tag_(Tag::kH264) {
data_.h264 = new H264MetadataPtr;
}
CodecMetadata::~CodecMetadata() {
DestroyActive();
}
void CodecMetadata::set_h264(
H264MetadataPtr h264) {
if (tag_ == Tag::kH264) {
*(data_.h264) = std::move(h264);
} else {
DestroyActive();
tag_ = Tag::kH264;
data_.h264 = new H264MetadataPtr(
std::move(h264));
}
}
void CodecMetadata::set_vp8(
Vp8MetadataPtr vp8) {
if (tag_ == Tag::kVp8) {
*(data_.vp8) = std::move(vp8);
} else {
DestroyActive();
tag_ = Tag::kVp8;
data_.vp8 = new Vp8MetadataPtr(
std::move(vp8));
}
}
void CodecMetadata::set_vp9(
Vp9MetadataPtr vp9) {
if (tag_ == Tag::kVp9) {
*(data_.vp9) = std::move(vp9);
} else {
DestroyActive();
tag_ = Tag::kVp9;
data_.vp9 = new Vp9MetadataPtr(
std::move(vp9));
}
}
void CodecMetadata::set_av1(
Av1MetadataPtr av1) {
if (tag_ == Tag::kAv1) {
*(data_.av1) = std::move(av1);
} else {
DestroyActive();
tag_ = Tag::kAv1;
data_.av1 = new Av1MetadataPtr(
std::move(av1));
}
}
void CodecMetadata::DestroyActive() {
switch (tag_) {
case Tag::kH264:
delete data_.h264;
break;
case Tag::kVp8:
delete data_.vp8;
break;
case Tag::kVp9:
delete data_.vp9;
break;
case Tag::kAv1:
delete data_.av1;
break;
}
}
bool CodecMetadata::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) {
return Data_::Validate(data, validation_context, false);
}
const char VideoEncodeAcceleratorProvider::Name_[] = "media.mojom.VideoEncodeAcceleratorProvider";
std::pair<uint32_t, const void*> VideoEncodeAcceleratorProvider::MessageToMethodInfo_(mojo::Message& message) {
switch (message.name()) {
case internal::kVideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAcceleratorProvider::CreateVideoEncodeAccelerator");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAcceleratorProvider::CreateVideoEncodeAccelerator_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfiles");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfiles_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
}
return std::make_pair(0, nullptr);
}
const char* VideoEncodeAcceleratorProvider::MessageToMethodName_(mojo::Message& message) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
bool is_response = message.has_flag(mojo::Message::kFlagIsResponse);
if (!is_response) {
switch (message.name()) {
case internal::kVideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Name:
return "Receive media::mojom::VideoEncodeAcceleratorProvider::CreateVideoEncodeAccelerator";
case internal::kVideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Name:
return "Receive media::mojom::VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfiles";
}
} else {
switch (message.name()) {
case internal::kVideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Name:
return "Receive reply media::mojom::VideoEncodeAcceleratorProvider::CreateVideoEncodeAccelerator";
case internal::kVideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Name:
return "Receive reply media::mojom::VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfiles";
}
}
return "Receive unknown mojo message";
#else
bool is_response = message.has_flag(mojo::Message::kFlagIsResponse);
if (is_response) {
return "Receive mojo reply";
} else {
return "Receive mojo message";
}
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
}
#if !BUILDFLAG(IS_FUCHSIA)
void VideoEncodeAcceleratorProvider::CreateVideoEncodeAccelerator_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfiles_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
# endif // !BUILDFLAG(IS_FUCHSIA)
class VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ForwardToCallback
: public mojo::MessageReceiver {
public:
VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ForwardToCallback(
VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfilesCallback callback
) : callback_(std::move(callback)) {
}
VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ForwardToCallback(const VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ForwardToCallback&) = delete;
VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ForwardToCallback& operator=(const VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ForwardToCallback&) = delete;
bool Accept(mojo::Message* message) override;
private:
VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfilesCallback callback_;
};
VideoEncodeAcceleratorProviderProxy::VideoEncodeAcceleratorProviderProxy(mojo::MessageReceiverWithResponder* receiver)
: receiver_(receiver) {
}
void VideoEncodeAcceleratorProviderProxy::CreateVideoEncodeAccelerator(
::mojo::PendingReceiver<VideoEncodeAccelerator> in_receiver) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAcceleratorProvider::CreateVideoEncodeAccelerator", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("receiver"), in_receiver,
"<value of type ::mojo::PendingReceiver<VideoEncodeAccelerator>>");
});
#endif
const bool kExpectsResponse = false;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Params_Data> params(
message);
params.Allocate();
mojo::internal::Serialize<mojo::InterfaceRequestDataView<::media::mojom::VideoEncodeAcceleratorInterfaceBase>>(
in_receiver, &params->receiver, &params.message());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
!mojo::internal::IsHandleOrInterfaceValid(params->receiver),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_INVALID_HANDLE,
"invalid receiver in VideoEncodeAcceleratorProvider.CreateVideoEncodeAccelerator request");
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAcceleratorProvider::Name_);
message.set_method_name("CreateVideoEncodeAccelerator");
#endif
// This return value may be ignored as false implies the Connector has
// encountered an error, which will be visible through other means.
::mojo::internal::SendMessage(*receiver_, message);
}
void VideoEncodeAcceleratorProviderProxy::GetVideoEncodeAcceleratorSupportedProfiles(
GetVideoEncodeAcceleratorSupportedProfilesCallback callback) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT0("mojom", "Send media::mojom::VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfiles");
#endif
const bool kExpectsResponse = true;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Params_Data> params(
message);
params.Allocate();
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAcceleratorProvider::Name_);
message.set_method_name("GetVideoEncodeAcceleratorSupportedProfiles");
#endif
std::unique_ptr<mojo::MessageReceiver> responder(
new VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ForwardToCallback(
std::move(callback)));
::mojo::internal::SendMessage(*receiver_, message, std::move(responder));
}
class VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ProxyToResponder : public ::mojo::internal::ProxyToResponder {
public:
static VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfilesCallback CreateCallback(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {
std::unique_ptr<VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ProxyToResponder> proxy(
new VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ProxyToResponder(
message, std::move(responder)));
return base::BindOnce(&VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ProxyToResponder::Run,
std::move(proxy));
}
~VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ProxyToResponder() {
#if DCHECK_IS_ON()
if (responder_) {
// If we're being destroyed without being run, we want to ensure the
// binding endpoint has been closed. This checks for that asynchronously.
// We pass a bound generated callback to handle the response so that any
// resulting DCHECK stack will have useful interface type information.
responder_->IsConnectedAsync(base::BindOnce(&OnIsConnectedComplete));
}
#endif
}
private:
VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ProxyToResponder(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder)
: ::mojo::internal::ProxyToResponder(message, std::move(responder)) {
}
#if DCHECK_IS_ON()
static void OnIsConnectedComplete(bool connected) {
DCHECK(!connected)
<< "VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfilesCallback was destroyed without "
<< "first either being run or its corresponding binding being closed. "
<< "It is an error to drop response callbacks which still correspond "
<< "to an open interface pipe.";
}
#endif
void Run(
WTF::Vector<VideoEncodeAcceleratorSupportedProfilePtr> in_profiles);
};
bool VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ForwardToCallback::Accept(
mojo::Message* message) {
DCHECK(message->is_serialized());
internal::VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ResponseParams_Data* params =
reinterpret_cast<
internal::VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ResponseParams_Data*>(
message->mutable_payload());
bool success = true;
WTF::Vector<VideoEncodeAcceleratorSupportedProfilePtr> p_profiles{};
VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ResponseParamsDataView input_data_view(params, message);
if (success && !input_data_view.ReadProfiles(&p_profiles))
success = false;
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAcceleratorProvider::Name_, 1, true);
return false;
}
if (!callback_.is_null())
std::move(callback_).Run(
std::move(p_profiles));
return true;
}
void VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ProxyToResponder::Run(
WTF::Vector<VideoEncodeAcceleratorSupportedProfilePtr> in_profiles) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send reply media::mojom::VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfiles", "async_response_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("profiles"), in_profiles,
"<value of type WTF::Vector<VideoEncodeAcceleratorSupportedProfilePtr>>");
});
#endif
const uint32_t kFlags = mojo::Message::kFlagIsResponse |
((is_sync_) ? mojo::Message::kFlagIsSync : 0) |
((true) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ResponseParams_Data> params(
message);
params.Allocate();
mojo::internal::MessageFragment<
typename decltype(params->profiles)::BaseType>
profiles_fragment(params.message());
const mojo::internal::ContainerValidateParams profiles_validate_params(
0, false, nullptr);
mojo::internal::Serialize<mojo::ArrayDataView<::media::mojom::VideoEncodeAcceleratorSupportedProfileDataView>>(
in_profiles, profiles_fragment, &profiles_validate_params);
params->profiles.Set(
profiles_fragment.is_null() ? nullptr : profiles_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->profiles.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null profiles in ");
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAcceleratorProvider::Name_);
message.set_method_name("GetVideoEncodeAcceleratorSupportedProfiles");
#endif
message.set_request_id(request_id_);
message.set_trace_nonce(trace_nonce_);
::mojo::internal::SendMessage(*responder_, message);
// SendMessage fails silently if the responder connection is closed,
// or if the message is malformed.
//
// TODO(darin): If Accept() returns false due to a malformed message, that
// may be good reason to close the connection. However, we don't have a
// way to do that from here. We should add a way.
responder_ = nullptr;
}
// static
bool VideoEncodeAcceleratorProviderStubDispatch::Accept(
VideoEncodeAcceleratorProvider* impl,
mojo::Message* message) {
switch (message->header()->name) {
case internal::kVideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Name: {
DCHECK(message->is_serialized());
internal::VideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Params_Data* params =
reinterpret_cast<internal::VideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Params_Data*>(
message->mutable_payload());
bool success = true;
::mojo::PendingReceiver<VideoEncodeAccelerator> p_receiver{};
VideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_ParamsDataView input_data_view(params, message);
if (success) {
p_receiver =
input_data_view.TakeReceiver<decltype(p_receiver)>();
}
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAcceleratorProvider::Name_, 0, false);
return false;
}
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->CreateVideoEncodeAccelerator(
std::move(p_receiver));
return true;
}
case internal::kVideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Name: {
break;
}
}
return false;
}
// static
bool VideoEncodeAcceleratorProviderStubDispatch::AcceptWithResponder(
VideoEncodeAcceleratorProvider* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {
[[maybe_unused]] const bool message_is_sync =
message->has_flag(mojo::Message::kFlagIsSync);
[[maybe_unused]] const uint64_t request_id = message->request_id();
switch (message->header()->name) {
case internal::kVideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Name: {
break;
}
case internal::kVideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Name: {
internal::VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Params_Data* params =
reinterpret_cast<
internal::VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Params_Data*>(
message->mutable_payload());
bool success = true;
VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ParamsDataView input_data_view(params, message);
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAcceleratorProvider::Name_, 1, false);
return false;
}
VideoEncodeAcceleratorProvider::GetVideoEncodeAcceleratorSupportedProfilesCallback callback =
VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ProxyToResponder::CreateCallback(
*message, std::move(responder));
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->GetVideoEncodeAcceleratorSupportedProfiles(std::move(callback));
return true;
}
}
return false;
}
static const mojo::internal::GenericValidationInfo kVideoEncodeAcceleratorProviderValidationInfo[] = {
{&internal::VideoEncodeAcceleratorProvider_CreateVideoEncodeAccelerator_Params_Data::Validate,
nullptr /* no response */},
{&internal::VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_Params_Data::Validate,
&internal::VideoEncodeAcceleratorProvider_GetVideoEncodeAcceleratorSupportedProfiles_ResponseParams_Data::Validate},
};
bool VideoEncodeAcceleratorProviderRequestValidator::Accept(mojo::Message* message) {
const char* name = ::media::mojom::blink::VideoEncodeAcceleratorProvider::Name_;
return mojo::internal::ValidateRequestGenericPacked(message, name, kVideoEncodeAcceleratorProviderValidationInfo);
}
bool VideoEncodeAcceleratorProviderResponseValidator::Accept(mojo::Message* message) {
const char* name = ::media::mojom::blink::VideoEncodeAcceleratorProvider::Name_;
return mojo::internal::ValidateResponseGenericPacked(message, name, kVideoEncodeAcceleratorProviderValidationInfo);
}
const char VideoEncodeAccelerator::Name_[] = "media.mojom.VideoEncodeAccelerator";
std::pair<uint32_t, const void*> VideoEncodeAccelerator::MessageToMethodInfo_(mojo::Message& message) {
switch (message.name()) {
case internal::kVideoEncodeAccelerator_Initialize_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAccelerator::Initialize");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAccelerator::Initialize_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAccelerator_Encode_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAccelerator::Encode");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAccelerator::Encode_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAccelerator_UseOutputBitstreamBuffer_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAccelerator::UseOutputBitstreamBuffer");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAccelerator::UseOutputBitstreamBuffer_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAccelerator::RequestEncodingParametersChangeWithLayers");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAccelerator::RequestEncodingParametersChangeWithLayers_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAccelerator::RequestEncodingParametersChangeWithBitrate");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAccelerator::RequestEncodingParametersChangeWithBitrate_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAccelerator_IsFlushSupported_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAccelerator::IsFlushSupported");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAccelerator::IsFlushSupported_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAccelerator_Flush_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAccelerator::Flush");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAccelerator::Flush_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
}
return std::make_pair(0, nullptr);
}
const char* VideoEncodeAccelerator::MessageToMethodName_(mojo::Message& message) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
bool is_response = message.has_flag(mojo::Message::kFlagIsResponse);
if (!is_response) {
switch (message.name()) {
case internal::kVideoEncodeAccelerator_Initialize_Name:
return "Receive media::mojom::VideoEncodeAccelerator::Initialize";
case internal::kVideoEncodeAccelerator_Encode_Name:
return "Receive media::mojom::VideoEncodeAccelerator::Encode";
case internal::kVideoEncodeAccelerator_UseOutputBitstreamBuffer_Name:
return "Receive media::mojom::VideoEncodeAccelerator::UseOutputBitstreamBuffer";
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Name:
return "Receive media::mojom::VideoEncodeAccelerator::RequestEncodingParametersChangeWithLayers";
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Name:
return "Receive media::mojom::VideoEncodeAccelerator::RequestEncodingParametersChangeWithBitrate";
case internal::kVideoEncodeAccelerator_IsFlushSupported_Name:
return "Receive media::mojom::VideoEncodeAccelerator::IsFlushSupported";
case internal::kVideoEncodeAccelerator_Flush_Name:
return "Receive media::mojom::VideoEncodeAccelerator::Flush";
}
} else {
switch (message.name()) {
case internal::kVideoEncodeAccelerator_Initialize_Name:
return "Receive reply media::mojom::VideoEncodeAccelerator::Initialize";
case internal::kVideoEncodeAccelerator_Encode_Name:
return "Receive reply media::mojom::VideoEncodeAccelerator::Encode";
case internal::kVideoEncodeAccelerator_UseOutputBitstreamBuffer_Name:
return "Receive reply media::mojom::VideoEncodeAccelerator::UseOutputBitstreamBuffer";
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Name:
return "Receive reply media::mojom::VideoEncodeAccelerator::RequestEncodingParametersChangeWithLayers";
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Name:
return "Receive reply media::mojom::VideoEncodeAccelerator::RequestEncodingParametersChangeWithBitrate";
case internal::kVideoEncodeAccelerator_IsFlushSupported_Name:
return "Receive reply media::mojom::VideoEncodeAccelerator::IsFlushSupported";
case internal::kVideoEncodeAccelerator_Flush_Name:
return "Receive reply media::mojom::VideoEncodeAccelerator::Flush";
}
}
return "Receive unknown mojo message";
#else
bool is_response = message.has_flag(mojo::Message::kFlagIsResponse);
if (is_response) {
return "Receive mojo reply";
} else {
return "Receive mojo message";
}
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
}
#if !BUILDFLAG(IS_FUCHSIA)
void VideoEncodeAccelerator::Initialize_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAccelerator::Encode_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAccelerator::UseOutputBitstreamBuffer_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAccelerator::RequestEncodingParametersChangeWithLayers_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAccelerator::RequestEncodingParametersChangeWithBitrate_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAccelerator::IsFlushSupported_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAccelerator::Flush_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
# endif // !BUILDFLAG(IS_FUCHSIA)
bool VideoEncodeAccelerator::Initialize(VideoEncodeAcceleratorConfigPtr config, ::mojo::PendingAssociatedRemote<VideoEncodeAcceleratorClient> client, ::mojo::PendingRemote<::media::mojom::blink::MediaLog> media_log, bool* out_result) {
NOTREACHED();
return false;
}
bool VideoEncodeAccelerator::IsFlushSupported(bool* out_result) {
NOTREACHED();
return false;
}
class VideoEncodeAccelerator_Initialize_HandleSyncResponse
: public mojo::MessageReceiver {
public:
VideoEncodeAccelerator_Initialize_HandleSyncResponse(
bool* result, bool* out_result)
: result_(result), out_result_(out_result) {
DCHECK(!*result_);
}
VideoEncodeAccelerator_Initialize_HandleSyncResponse(const VideoEncodeAccelerator_Initialize_HandleSyncResponse&) = delete;
VideoEncodeAccelerator_Initialize_HandleSyncResponse& operator=(const VideoEncodeAccelerator_Initialize_HandleSyncResponse&) = delete;
bool Accept(mojo::Message* message) override;
private:
bool* result_;
bool* out_result_;};
class VideoEncodeAccelerator_Initialize_ForwardToCallback
: public mojo::MessageReceiver {
public:
VideoEncodeAccelerator_Initialize_ForwardToCallback(
VideoEncodeAccelerator::InitializeCallback callback
) : callback_(std::move(callback)) {
}
VideoEncodeAccelerator_Initialize_ForwardToCallback(const VideoEncodeAccelerator_Initialize_ForwardToCallback&) = delete;
VideoEncodeAccelerator_Initialize_ForwardToCallback& operator=(const VideoEncodeAccelerator_Initialize_ForwardToCallback&) = delete;
bool Accept(mojo::Message* message) override;
private:
VideoEncodeAccelerator::InitializeCallback callback_;
};
class VideoEncodeAccelerator_Encode_ForwardToCallback
: public mojo::MessageReceiver {
public:
VideoEncodeAccelerator_Encode_ForwardToCallback(
VideoEncodeAccelerator::EncodeCallback callback
) : callback_(std::move(callback)) {
}
VideoEncodeAccelerator_Encode_ForwardToCallback(const VideoEncodeAccelerator_Encode_ForwardToCallback&) = delete;
VideoEncodeAccelerator_Encode_ForwardToCallback& operator=(const VideoEncodeAccelerator_Encode_ForwardToCallback&) = delete;
bool Accept(mojo::Message* message) override;
private:
VideoEncodeAccelerator::EncodeCallback callback_;
};
class VideoEncodeAccelerator_IsFlushSupported_HandleSyncResponse
: public mojo::MessageReceiver {
public:
VideoEncodeAccelerator_IsFlushSupported_HandleSyncResponse(
bool* result, bool* out_result)
: result_(result), out_result_(out_result) {
DCHECK(!*result_);
}
VideoEncodeAccelerator_IsFlushSupported_HandleSyncResponse(const VideoEncodeAccelerator_IsFlushSupported_HandleSyncResponse&) = delete;
VideoEncodeAccelerator_IsFlushSupported_HandleSyncResponse& operator=(const VideoEncodeAccelerator_IsFlushSupported_HandleSyncResponse&) = delete;
bool Accept(mojo::Message* message) override;
private:
bool* result_;
bool* out_result_;};
class VideoEncodeAccelerator_IsFlushSupported_ForwardToCallback
: public mojo::MessageReceiver {
public:
VideoEncodeAccelerator_IsFlushSupported_ForwardToCallback(
VideoEncodeAccelerator::IsFlushSupportedCallback callback
) : callback_(std::move(callback)) {
}
VideoEncodeAccelerator_IsFlushSupported_ForwardToCallback(const VideoEncodeAccelerator_IsFlushSupported_ForwardToCallback&) = delete;
VideoEncodeAccelerator_IsFlushSupported_ForwardToCallback& operator=(const VideoEncodeAccelerator_IsFlushSupported_ForwardToCallback&) = delete;
bool Accept(mojo::Message* message) override;
private:
VideoEncodeAccelerator::IsFlushSupportedCallback callback_;
};
class VideoEncodeAccelerator_Flush_ForwardToCallback
: public mojo::MessageReceiver {
public:
VideoEncodeAccelerator_Flush_ForwardToCallback(
VideoEncodeAccelerator::FlushCallback callback
) : callback_(std::move(callback)) {
}
VideoEncodeAccelerator_Flush_ForwardToCallback(const VideoEncodeAccelerator_Flush_ForwardToCallback&) = delete;
VideoEncodeAccelerator_Flush_ForwardToCallback& operator=(const VideoEncodeAccelerator_Flush_ForwardToCallback&) = delete;
bool Accept(mojo::Message* message) override;
private:
VideoEncodeAccelerator::FlushCallback callback_;
};
VideoEncodeAcceleratorProxy::VideoEncodeAcceleratorProxy(mojo::MessageReceiverWithResponder* receiver)
: receiver_(receiver) {
}
bool VideoEncodeAcceleratorProxy::Initialize(
VideoEncodeAcceleratorConfigPtr param_config, ::mojo::PendingAssociatedRemote<VideoEncodeAcceleratorClient> param_client, ::mojo::PendingRemote<::media::mojom::blink::MediaLog> param_media_log, bool* out_param_result) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT_BEGIN1(
"mojom", "Call media::mojom::VideoEncodeAccelerator::Initialize (sync)", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("config"), param_config,
"<value of type VideoEncodeAcceleratorConfigPtr>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("client"), param_client,
"<value of type ::mojo::PendingAssociatedRemote<VideoEncodeAcceleratorClient>>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("media_log"), param_media_log,
"<value of type ::mojo::PendingRemote<::media::mojom::blink::MediaLog>>");
});
#else
TRACE_EVENT0("mojom", "VideoEncodeAccelerator::Initialize");
#endif
const bool kExpectsResponse = true;
const bool kIsSync = true;
const bool kAllowInterrupt =
true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_Initialize_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_Initialize_Params_Data> params(
message);
params.Allocate();
mojo::internal::MessageFragment<
typename decltype(params->config)::BaseType> config_fragment(
params.message());
mojo::internal::Serialize<::media::mojom::VideoEncodeAcceleratorConfigDataView>(
param_config, config_fragment);
params->config.Set(
config_fragment.is_null() ? nullptr : config_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->config.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null config in VideoEncodeAccelerator.Initialize request");
mojo::internal::Serialize<::media::mojom::VideoEncodeAcceleratorClientAssociatedPtrInfoDataView>(
param_client, &params->client, &params.message());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
!mojo::internal::IsHandleOrInterfaceValid(params->client),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_INVALID_INTERFACE_ID,
"invalid client in VideoEncodeAccelerator.Initialize request");
mojo::internal::Serialize<mojo::InterfacePtrDataView<::media::mojom::MediaLogInterfaceBase>>(
param_media_log, &params->media_log, &params.message());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
!mojo::internal::IsHandleOrInterfaceValid(params->media_log),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_INVALID_HANDLE,
"invalid media_log in VideoEncodeAccelerator.Initialize request");
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("Initialize");
#endif
bool result = false;
std::unique_ptr<mojo::MessageReceiver> responder(
new VideoEncodeAccelerator_Initialize_HandleSyncResponse(
&result, out_param_result));
::mojo::internal::SendMessage(*receiver_, message, std::move(responder));
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT_END1(
"mojom", "VideoEncodeAccelerator::Initialize", "sync_response_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("result"), out_param_result,
"<value of type bool>");
});
#endif
return result;
}
void VideoEncodeAcceleratorProxy::Initialize(
VideoEncodeAcceleratorConfigPtr in_config, ::mojo::PendingAssociatedRemote<VideoEncodeAcceleratorClient> in_client, ::mojo::PendingRemote<::media::mojom::blink::MediaLog> in_media_log, InitializeCallback callback) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAccelerator::Initialize", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("config"), in_config,
"<value of type VideoEncodeAcceleratorConfigPtr>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("client"), in_client,
"<value of type ::mojo::PendingAssociatedRemote<VideoEncodeAcceleratorClient>>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("media_log"), in_media_log,
"<value of type ::mojo::PendingRemote<::media::mojom::blink::MediaLog>>");
});
#endif
const bool kExpectsResponse = true;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_Initialize_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_Initialize_Params_Data> params(
message);
params.Allocate();
mojo::internal::MessageFragment<
typename decltype(params->config)::BaseType> config_fragment(
params.message());
mojo::internal::Serialize<::media::mojom::VideoEncodeAcceleratorConfigDataView>(
in_config, config_fragment);
params->config.Set(
config_fragment.is_null() ? nullptr : config_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->config.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null config in VideoEncodeAccelerator.Initialize request");
mojo::internal::Serialize<::media::mojom::VideoEncodeAcceleratorClientAssociatedPtrInfoDataView>(
in_client, &params->client, &params.message());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
!mojo::internal::IsHandleOrInterfaceValid(params->client),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_INVALID_INTERFACE_ID,
"invalid client in VideoEncodeAccelerator.Initialize request");
mojo::internal::Serialize<mojo::InterfacePtrDataView<::media::mojom::MediaLogInterfaceBase>>(
in_media_log, &params->media_log, &params.message());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
!mojo::internal::IsHandleOrInterfaceValid(params->media_log),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_INVALID_HANDLE,
"invalid media_log in VideoEncodeAccelerator.Initialize request");
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("Initialize");
#endif
std::unique_ptr<mojo::MessageReceiver> responder(
new VideoEncodeAccelerator_Initialize_ForwardToCallback(
std::move(callback)));
::mojo::internal::SendMessage(*receiver_, message, std::move(responder));
}
void VideoEncodeAcceleratorProxy::Encode(
::media::mojom::blink::VideoFramePtr in_frame, bool in_force_keyframe, EncodeCallback callback) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAccelerator::Encode", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("frame"), in_frame,
"<value of type ::media::mojom::blink::VideoFramePtr>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("force_keyframe"), in_force_keyframe,
"<value of type bool>");
});
#endif
const bool kExpectsResponse = true;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_Encode_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_Encode_Params_Data> params(
message);
params.Allocate();
mojo::internal::MessageFragment<
typename decltype(params->frame)::BaseType> frame_fragment(
params.message());
mojo::internal::Serialize<::media::mojom::VideoFrameDataView>(
in_frame, frame_fragment);
params->frame.Set(
frame_fragment.is_null() ? nullptr : frame_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->frame.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null frame in VideoEncodeAccelerator.Encode request");
params->force_keyframe = in_force_keyframe;
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("Encode");
#endif
std::unique_ptr<mojo::MessageReceiver> responder(
new VideoEncodeAccelerator_Encode_ForwardToCallback(
std::move(callback)));
::mojo::internal::SendMessage(*receiver_, message, std::move(responder));
}
void VideoEncodeAcceleratorProxy::UseOutputBitstreamBuffer(
int32_t in_bitstream_buffer_id, ::base::UnsafeSharedMemoryRegion in_region) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAccelerator::UseOutputBitstreamBuffer", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("bitstream_buffer_id"), in_bitstream_buffer_id,
"<value of type int32_t>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("region"), in_region,
"<value of type ::base::UnsafeSharedMemoryRegion>");
});
#endif
const bool kExpectsResponse = false;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_UseOutputBitstreamBuffer_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_UseOutputBitstreamBuffer_Params_Data> params(
message);
params.Allocate();
params->bitstream_buffer_id = in_bitstream_buffer_id;
mojo::internal::MessageFragment<
typename decltype(params->region)::BaseType> region_fragment(
params.message());
mojo::internal::Serialize<::mojo_base::mojom::UnsafeSharedMemoryRegionDataView>(
in_region, region_fragment);
params->region.Set(
region_fragment.is_null() ? nullptr : region_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->region.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null region in VideoEncodeAccelerator.UseOutputBitstreamBuffer request");
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("UseOutputBitstreamBuffer");
#endif
// This return value may be ignored as false implies the Connector has
// encountered an error, which will be visible through other means.
::mojo::internal::SendMessage(*receiver_, message);
}
void VideoEncodeAcceleratorProxy::RequestEncodingParametersChangeWithLayers(
VideoBitrateAllocationPtr in_bitrate_allocation, uint32_t in_framerate) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAccelerator::RequestEncodingParametersChangeWithLayers", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("bitrate_allocation"), in_bitrate_allocation,
"<value of type VideoBitrateAllocationPtr>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("framerate"), in_framerate,
"<value of type uint32_t>");
});
#endif
const bool kExpectsResponse = false;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Params_Data> params(
message);
params.Allocate();
mojo::internal::MessageFragment<
typename decltype(params->bitrate_allocation)::BaseType> bitrate_allocation_fragment(
params.message());
mojo::internal::Serialize<::media::mojom::VideoBitrateAllocationDataView>(
in_bitrate_allocation, bitrate_allocation_fragment);
params->bitrate_allocation.Set(
bitrate_allocation_fragment.is_null() ? nullptr : bitrate_allocation_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->bitrate_allocation.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null bitrate_allocation in VideoEncodeAccelerator.RequestEncodingParametersChangeWithLayers request");
params->framerate = in_framerate;
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("RequestEncodingParametersChangeWithLayers");
#endif
// This return value may be ignored as false implies the Connector has
// encountered an error, which will be visible through other means.
::mojo::internal::SendMessage(*receiver_, message);
}
void VideoEncodeAcceleratorProxy::RequestEncodingParametersChangeWithBitrate(
BitratePtr in_bitrate, uint32_t in_framerate) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAccelerator::RequestEncodingParametersChangeWithBitrate", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("bitrate"), in_bitrate,
"<value of type BitratePtr>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("framerate"), in_framerate,
"<value of type uint32_t>");
});
#endif
const bool kExpectsResponse = false;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Params_Data> params(
message);
params.Allocate();
mojo::internal::MessageFragment<decltype(params->bitrate)>
bitrate_fragment(params.message());
bitrate_fragment.Claim(&params->bitrate);
mojo::internal::Serialize<::media::mojom::BitrateDataView>(
in_bitrate, bitrate_fragment, true);
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->bitrate.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null bitrate in VideoEncodeAccelerator.RequestEncodingParametersChangeWithBitrate request");
params->framerate = in_framerate;
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("RequestEncodingParametersChangeWithBitrate");
#endif
// This return value may be ignored as false implies the Connector has
// encountered an error, which will be visible through other means.
::mojo::internal::SendMessage(*receiver_, message);
}
bool VideoEncodeAcceleratorProxy::IsFlushSupported(
bool* out_param_result) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT_BEGIN0("mojom", "Call media::mojom::VideoEncodeAccelerator::IsFlushSupported (sync)");
#else
TRACE_EVENT0("mojom", "VideoEncodeAccelerator::IsFlushSupported");
#endif
const bool kExpectsResponse = true;
const bool kIsSync = true;
const bool kAllowInterrupt =
true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_IsFlushSupported_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_IsFlushSupported_Params_Data> params(
message);
params.Allocate();
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("IsFlushSupported");
#endif
bool result = false;
std::unique_ptr<mojo::MessageReceiver> responder(
new VideoEncodeAccelerator_IsFlushSupported_HandleSyncResponse(
&result, out_param_result));
::mojo::internal::SendMessage(*receiver_, message, std::move(responder));
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT_END1(
"mojom", "VideoEncodeAccelerator::IsFlushSupported", "sync_response_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("result"), out_param_result,
"<value of type bool>");
});
#endif
return result;
}
void VideoEncodeAcceleratorProxy::IsFlushSupported(
IsFlushSupportedCallback callback) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT0("mojom", "Send media::mojom::VideoEncodeAccelerator::IsFlushSupported");
#endif
const bool kExpectsResponse = true;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_IsFlushSupported_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_IsFlushSupported_Params_Data> params(
message);
params.Allocate();
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("IsFlushSupported");
#endif
std::unique_ptr<mojo::MessageReceiver> responder(
new VideoEncodeAccelerator_IsFlushSupported_ForwardToCallback(
std::move(callback)));
::mojo::internal::SendMessage(*receiver_, message, std::move(responder));
}
void VideoEncodeAcceleratorProxy::Flush(
FlushCallback callback) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT0("mojom", "Send media::mojom::VideoEncodeAccelerator::Flush");
#endif
const bool kExpectsResponse = true;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_Flush_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_Flush_Params_Data> params(
message);
params.Allocate();
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("Flush");
#endif
std::unique_ptr<mojo::MessageReceiver> responder(
new VideoEncodeAccelerator_Flush_ForwardToCallback(
std::move(callback)));
::mojo::internal::SendMessage(*receiver_, message, std::move(responder));
}
class VideoEncodeAccelerator_Initialize_ProxyToResponder : public ::mojo::internal::ProxyToResponder {
public:
static VideoEncodeAccelerator::InitializeCallback CreateCallback(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {
std::unique_ptr<VideoEncodeAccelerator_Initialize_ProxyToResponder> proxy(
new VideoEncodeAccelerator_Initialize_ProxyToResponder(
message, std::move(responder)));
return base::BindOnce(&VideoEncodeAccelerator_Initialize_ProxyToResponder::Run,
std::move(proxy));
}
~VideoEncodeAccelerator_Initialize_ProxyToResponder() {
#if DCHECK_IS_ON()
if (responder_) {
// If we're being destroyed without being run, we want to ensure the
// binding endpoint has been closed. This checks for that asynchronously.
// We pass a bound generated callback to handle the response so that any
// resulting DCHECK stack will have useful interface type information.
responder_->IsConnectedAsync(base::BindOnce(&OnIsConnectedComplete));
}
#endif
}
private:
VideoEncodeAccelerator_Initialize_ProxyToResponder(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder)
: ::mojo::internal::ProxyToResponder(message, std::move(responder)) {
}
#if DCHECK_IS_ON()
static void OnIsConnectedComplete(bool connected) {
DCHECK(!connected)
<< "VideoEncodeAccelerator::InitializeCallback was destroyed without "
<< "first either being run or its corresponding binding being closed. "
<< "It is an error to drop response callbacks which still correspond "
<< "to an open interface pipe.";
}
#endif
void Run(
bool in_result);
};
bool VideoEncodeAccelerator_Initialize_ForwardToCallback::Accept(
mojo::Message* message) {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_Initialize_ResponseParams_Data* params =
reinterpret_cast<
internal::VideoEncodeAccelerator_Initialize_ResponseParams_Data*>(
message->mutable_payload());
bool success = true;
bool p_result{};
VideoEncodeAccelerator_Initialize_ResponseParamsDataView input_data_view(params, message);
if (success)
p_result = input_data_view.result();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 0, true);
return false;
}
if (!callback_.is_null())
std::move(callback_).Run(
std::move(p_result));
return true;
}
void VideoEncodeAccelerator_Initialize_ProxyToResponder::Run(
bool in_result) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send reply media::mojom::VideoEncodeAccelerator::Initialize", "async_response_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("result"), in_result,
"<value of type bool>");
});
#endif
const uint32_t kFlags = mojo::Message::kFlagIsResponse |
((is_sync_) ? mojo::Message::kFlagIsSync : 0) |
((true) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_Initialize_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_Initialize_ResponseParams_Data> params(
message);
params.Allocate();
params->result = in_result;
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("Initialize");
#endif
message.set_request_id(request_id_);
message.set_trace_nonce(trace_nonce_);
::mojo::internal::SendMessage(*responder_, message);
// SendMessage fails silently if the responder connection is closed,
// or if the message is malformed.
//
// TODO(darin): If Accept() returns false due to a malformed message, that
// may be good reason to close the connection. However, we don't have a
// way to do that from here. We should add a way.
responder_ = nullptr;
}
bool VideoEncodeAccelerator_Initialize_HandleSyncResponse::Accept(
mojo::Message* message) {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_Initialize_ResponseParams_Data* params =
reinterpret_cast<internal::VideoEncodeAccelerator_Initialize_ResponseParams_Data*>(
message->mutable_payload());
bool success = true;
bool p_result{};
VideoEncodeAccelerator_Initialize_ResponseParamsDataView input_data_view(params, message);
if (success)
p_result = input_data_view.result();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 0, true);
return false;
}
*out_result_ = std::move(p_result);
*result_ = true;
return true;
}
class VideoEncodeAccelerator_Encode_ProxyToResponder : public ::mojo::internal::ProxyToResponder {
public:
static VideoEncodeAccelerator::EncodeCallback CreateCallback(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {
std::unique_ptr<VideoEncodeAccelerator_Encode_ProxyToResponder> proxy(
new VideoEncodeAccelerator_Encode_ProxyToResponder(
message, std::move(responder)));
return base::BindOnce(&VideoEncodeAccelerator_Encode_ProxyToResponder::Run,
std::move(proxy));
}
~VideoEncodeAccelerator_Encode_ProxyToResponder() {
#if DCHECK_IS_ON()
if (responder_) {
// If we're being destroyed without being run, we want to ensure the
// binding endpoint has been closed. This checks for that asynchronously.
// We pass a bound generated callback to handle the response so that any
// resulting DCHECK stack will have useful interface type information.
responder_->IsConnectedAsync(base::BindOnce(&OnIsConnectedComplete));
}
#endif
}
private:
VideoEncodeAccelerator_Encode_ProxyToResponder(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder)
: ::mojo::internal::ProxyToResponder(message, std::move(responder)) {
}
#if DCHECK_IS_ON()
static void OnIsConnectedComplete(bool connected) {
DCHECK(!connected)
<< "VideoEncodeAccelerator::EncodeCallback was destroyed without "
<< "first either being run or its corresponding binding being closed. "
<< "It is an error to drop response callbacks which still correspond "
<< "to an open interface pipe.";
}
#endif
void Run(
);
};
bool VideoEncodeAccelerator_Encode_ForwardToCallback::Accept(
mojo::Message* message) {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_Encode_ResponseParams_Data* params =
reinterpret_cast<
internal::VideoEncodeAccelerator_Encode_ResponseParams_Data*>(
message->mutable_payload());
bool success = true;
VideoEncodeAccelerator_Encode_ResponseParamsDataView input_data_view(params, message);
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 1, true);
return false;
}
if (!callback_.is_null())
std::move(callback_).Run();
return true;
}
void VideoEncodeAccelerator_Encode_ProxyToResponder::Run(
) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT0("mojom", "Send reply media::mojom::VideoEncodeAccelerator::Encode");
#endif
const uint32_t kFlags = mojo::Message::kFlagIsResponse |
((is_sync_) ? mojo::Message::kFlagIsSync : 0) |
((true) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_Encode_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_Encode_ResponseParams_Data> params(
message);
params.Allocate();
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("Encode");
#endif
message.set_request_id(request_id_);
message.set_trace_nonce(trace_nonce_);
::mojo::internal::SendMessage(*responder_, message);
// SendMessage fails silently if the responder connection is closed,
// or if the message is malformed.
//
// TODO(darin): If Accept() returns false due to a malformed message, that
// may be good reason to close the connection. However, we don't have a
// way to do that from here. We should add a way.
responder_ = nullptr;
}
class VideoEncodeAccelerator_IsFlushSupported_ProxyToResponder : public ::mojo::internal::ProxyToResponder {
public:
static VideoEncodeAccelerator::IsFlushSupportedCallback CreateCallback(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {
std::unique_ptr<VideoEncodeAccelerator_IsFlushSupported_ProxyToResponder> proxy(
new VideoEncodeAccelerator_IsFlushSupported_ProxyToResponder(
message, std::move(responder)));
return base::BindOnce(&VideoEncodeAccelerator_IsFlushSupported_ProxyToResponder::Run,
std::move(proxy));
}
~VideoEncodeAccelerator_IsFlushSupported_ProxyToResponder() {
#if DCHECK_IS_ON()
if (responder_) {
// If we're being destroyed without being run, we want to ensure the
// binding endpoint has been closed. This checks for that asynchronously.
// We pass a bound generated callback to handle the response so that any
// resulting DCHECK stack will have useful interface type information.
responder_->IsConnectedAsync(base::BindOnce(&OnIsConnectedComplete));
}
#endif
}
private:
VideoEncodeAccelerator_IsFlushSupported_ProxyToResponder(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder)
: ::mojo::internal::ProxyToResponder(message, std::move(responder)) {
}
#if DCHECK_IS_ON()
static void OnIsConnectedComplete(bool connected) {
DCHECK(!connected)
<< "VideoEncodeAccelerator::IsFlushSupportedCallback was destroyed without "
<< "first either being run or its corresponding binding being closed. "
<< "It is an error to drop response callbacks which still correspond "
<< "to an open interface pipe.";
}
#endif
void Run(
bool in_result);
};
bool VideoEncodeAccelerator_IsFlushSupported_ForwardToCallback::Accept(
mojo::Message* message) {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_IsFlushSupported_ResponseParams_Data* params =
reinterpret_cast<
internal::VideoEncodeAccelerator_IsFlushSupported_ResponseParams_Data*>(
message->mutable_payload());
bool success = true;
bool p_result{};
VideoEncodeAccelerator_IsFlushSupported_ResponseParamsDataView input_data_view(params, message);
if (success)
p_result = input_data_view.result();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 5, true);
return false;
}
if (!callback_.is_null())
std::move(callback_).Run(
std::move(p_result));
return true;
}
void VideoEncodeAccelerator_IsFlushSupported_ProxyToResponder::Run(
bool in_result) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send reply media::mojom::VideoEncodeAccelerator::IsFlushSupported", "async_response_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("result"), in_result,
"<value of type bool>");
});
#endif
const uint32_t kFlags = mojo::Message::kFlagIsResponse |
((is_sync_) ? mojo::Message::kFlagIsSync : 0) |
((true) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_IsFlushSupported_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_IsFlushSupported_ResponseParams_Data> params(
message);
params.Allocate();
params->result = in_result;
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("IsFlushSupported");
#endif
message.set_request_id(request_id_);
message.set_trace_nonce(trace_nonce_);
::mojo::internal::SendMessage(*responder_, message);
// SendMessage fails silently if the responder connection is closed,
// or if the message is malformed.
//
// TODO(darin): If Accept() returns false due to a malformed message, that
// may be good reason to close the connection. However, we don't have a
// way to do that from here. We should add a way.
responder_ = nullptr;
}
bool VideoEncodeAccelerator_IsFlushSupported_HandleSyncResponse::Accept(
mojo::Message* message) {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_IsFlushSupported_ResponseParams_Data* params =
reinterpret_cast<internal::VideoEncodeAccelerator_IsFlushSupported_ResponseParams_Data*>(
message->mutable_payload());
bool success = true;
bool p_result{};
VideoEncodeAccelerator_IsFlushSupported_ResponseParamsDataView input_data_view(params, message);
if (success)
p_result = input_data_view.result();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 5, true);
return false;
}
*out_result_ = std::move(p_result);
*result_ = true;
return true;
}
class VideoEncodeAccelerator_Flush_ProxyToResponder : public ::mojo::internal::ProxyToResponder {
public:
static VideoEncodeAccelerator::FlushCallback CreateCallback(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {
std::unique_ptr<VideoEncodeAccelerator_Flush_ProxyToResponder> proxy(
new VideoEncodeAccelerator_Flush_ProxyToResponder(
message, std::move(responder)));
return base::BindOnce(&VideoEncodeAccelerator_Flush_ProxyToResponder::Run,
std::move(proxy));
}
~VideoEncodeAccelerator_Flush_ProxyToResponder() {
#if DCHECK_IS_ON()
if (responder_) {
// If we're being destroyed without being run, we want to ensure the
// binding endpoint has been closed. This checks for that asynchronously.
// We pass a bound generated callback to handle the response so that any
// resulting DCHECK stack will have useful interface type information.
responder_->IsConnectedAsync(base::BindOnce(&OnIsConnectedComplete));
}
#endif
}
private:
VideoEncodeAccelerator_Flush_ProxyToResponder(
::mojo::Message& message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder)
: ::mojo::internal::ProxyToResponder(message, std::move(responder)) {
}
#if DCHECK_IS_ON()
static void OnIsConnectedComplete(bool connected) {
DCHECK(!connected)
<< "VideoEncodeAccelerator::FlushCallback was destroyed without "
<< "first either being run or its corresponding binding being closed. "
<< "It is an error to drop response callbacks which still correspond "
<< "to an open interface pipe.";
}
#endif
void Run(
bool in_result);
};
bool VideoEncodeAccelerator_Flush_ForwardToCallback::Accept(
mojo::Message* message) {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_Flush_ResponseParams_Data* params =
reinterpret_cast<
internal::VideoEncodeAccelerator_Flush_ResponseParams_Data*>(
message->mutable_payload());
bool success = true;
bool p_result{};
VideoEncodeAccelerator_Flush_ResponseParamsDataView input_data_view(params, message);
if (success)
p_result = input_data_view.result();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 6, true);
return false;
}
if (!callback_.is_null())
std::move(callback_).Run(
std::move(p_result));
return true;
}
void VideoEncodeAccelerator_Flush_ProxyToResponder::Run(
bool in_result) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send reply media::mojom::VideoEncodeAccelerator::Flush", "async_response_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("result"), in_result,
"<value of type bool>");
});
#endif
const uint32_t kFlags = mojo::Message::kFlagIsResponse |
((is_sync_) ? mojo::Message::kFlagIsSync : 0) |
((true) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAccelerator_Flush_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAccelerator_Flush_ResponseParams_Data> params(
message);
params.Allocate();
params->result = in_result;
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAccelerator::Name_);
message.set_method_name("Flush");
#endif
message.set_request_id(request_id_);
message.set_trace_nonce(trace_nonce_);
::mojo::internal::SendMessage(*responder_, message);
// SendMessage fails silently if the responder connection is closed,
// or if the message is malformed.
//
// TODO(darin): If Accept() returns false due to a malformed message, that
// may be good reason to close the connection. However, we don't have a
// way to do that from here. We should add a way.
responder_ = nullptr;
}
// static
bool VideoEncodeAcceleratorStubDispatch::Accept(
VideoEncodeAccelerator* impl,
mojo::Message* message) {
switch (message->header()->name) {
case internal::kVideoEncodeAccelerator_Initialize_Name: {
break;
}
case internal::kVideoEncodeAccelerator_Encode_Name: {
break;
}
case internal::kVideoEncodeAccelerator_UseOutputBitstreamBuffer_Name: {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_UseOutputBitstreamBuffer_Params_Data* params =
reinterpret_cast<internal::VideoEncodeAccelerator_UseOutputBitstreamBuffer_Params_Data*>(
message->mutable_payload());
bool success = true;
int32_t p_bitstream_buffer_id{};
::base::UnsafeSharedMemoryRegion p_region{};
VideoEncodeAccelerator_UseOutputBitstreamBuffer_ParamsDataView input_data_view(params, message);
if (success)
p_bitstream_buffer_id = input_data_view.bitstream_buffer_id();
if (success && !input_data_view.ReadRegion(&p_region))
success = false;
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 2, false);
return false;
}
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->UseOutputBitstreamBuffer(
std::move(p_bitstream_buffer_id),
std::move(p_region));
return true;
}
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Name: {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Params_Data* params =
reinterpret_cast<internal::VideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Params_Data*>(
message->mutable_payload());
bool success = true;
VideoBitrateAllocationPtr p_bitrate_allocation{};
uint32_t p_framerate{};
VideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_ParamsDataView input_data_view(params, message);
if (success && !input_data_view.ReadBitrateAllocation(&p_bitrate_allocation))
success = false;
if (success)
p_framerate = input_data_view.framerate();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 3, false);
return false;
}
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->RequestEncodingParametersChangeWithLayers(
std::move(p_bitrate_allocation),
std::move(p_framerate));
return true;
}
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Name: {
DCHECK(message->is_serialized());
internal::VideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Params_Data* params =
reinterpret_cast<internal::VideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Params_Data*>(
message->mutable_payload());
bool success = true;
BitratePtr p_bitrate{};
uint32_t p_framerate{};
VideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_ParamsDataView input_data_view(params, message);
if (success && !input_data_view.ReadBitrate(&p_bitrate))
success = false;
if (success)
p_framerate = input_data_view.framerate();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 4, false);
return false;
}
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->RequestEncodingParametersChangeWithBitrate(
std::move(p_bitrate),
std::move(p_framerate));
return true;
}
case internal::kVideoEncodeAccelerator_IsFlushSupported_Name: {
break;
}
case internal::kVideoEncodeAccelerator_Flush_Name: {
break;
}
}
return false;
}
// static
bool VideoEncodeAcceleratorStubDispatch::AcceptWithResponder(
VideoEncodeAccelerator* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {
[[maybe_unused]] const bool message_is_sync =
message->has_flag(mojo::Message::kFlagIsSync);
[[maybe_unused]] const uint64_t request_id = message->request_id();
switch (message->header()->name) {
case internal::kVideoEncodeAccelerator_Initialize_Name: {
internal::VideoEncodeAccelerator_Initialize_Params_Data* params =
reinterpret_cast<
internal::VideoEncodeAccelerator_Initialize_Params_Data*>(
message->mutable_payload());
bool success = true;
VideoEncodeAcceleratorConfigPtr p_config{};
::mojo::PendingAssociatedRemote<VideoEncodeAcceleratorClient> p_client{};
::mojo::PendingRemote<::media::mojom::blink::MediaLog> p_media_log{};
VideoEncodeAccelerator_Initialize_ParamsDataView input_data_view(params, message);
if (success && !input_data_view.ReadConfig(&p_config))
success = false;
if (success) {
p_client =
input_data_view.TakeClient<decltype(p_client)>();
}
if (success) {
p_media_log =
input_data_view.TakeMediaLog<decltype(p_media_log)>();
}
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 0, false);
return false;
}
VideoEncodeAccelerator::InitializeCallback callback =
VideoEncodeAccelerator_Initialize_ProxyToResponder::CreateCallback(
*message, std::move(responder));
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->Initialize(
std::move(p_config),
std::move(p_client),
std::move(p_media_log), std::move(callback));
return true;
}
case internal::kVideoEncodeAccelerator_Encode_Name: {
internal::VideoEncodeAccelerator_Encode_Params_Data* params =
reinterpret_cast<
internal::VideoEncodeAccelerator_Encode_Params_Data*>(
message->mutable_payload());
bool success = true;
::media::mojom::blink::VideoFramePtr p_frame{};
bool p_force_keyframe{};
VideoEncodeAccelerator_Encode_ParamsDataView input_data_view(params, message);
if (success && !input_data_view.ReadFrame(&p_frame))
success = false;
if (success)
p_force_keyframe = input_data_view.force_keyframe();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 1, false);
return false;
}
VideoEncodeAccelerator::EncodeCallback callback =
VideoEncodeAccelerator_Encode_ProxyToResponder::CreateCallback(
*message, std::move(responder));
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->Encode(
std::move(p_frame),
std::move(p_force_keyframe), std::move(callback));
return true;
}
case internal::kVideoEncodeAccelerator_UseOutputBitstreamBuffer_Name: {
break;
}
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Name: {
break;
}
case internal::kVideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Name: {
break;
}
case internal::kVideoEncodeAccelerator_IsFlushSupported_Name: {
internal::VideoEncodeAccelerator_IsFlushSupported_Params_Data* params =
reinterpret_cast<
internal::VideoEncodeAccelerator_IsFlushSupported_Params_Data*>(
message->mutable_payload());
bool success = true;
VideoEncodeAccelerator_IsFlushSupported_ParamsDataView input_data_view(params, message);
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 5, false);
return false;
}
VideoEncodeAccelerator::IsFlushSupportedCallback callback =
VideoEncodeAccelerator_IsFlushSupported_ProxyToResponder::CreateCallback(
*message, std::move(responder));
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->IsFlushSupported(std::move(callback));
return true;
}
case internal::kVideoEncodeAccelerator_Flush_Name: {
internal::VideoEncodeAccelerator_Flush_Params_Data* params =
reinterpret_cast<
internal::VideoEncodeAccelerator_Flush_Params_Data*>(
message->mutable_payload());
bool success = true;
VideoEncodeAccelerator_Flush_ParamsDataView input_data_view(params, message);
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAccelerator::Name_, 6, false);
return false;
}
VideoEncodeAccelerator::FlushCallback callback =
VideoEncodeAccelerator_Flush_ProxyToResponder::CreateCallback(
*message, std::move(responder));
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->Flush(std::move(callback));
return true;
}
}
return false;
}
static const mojo::internal::GenericValidationInfo kVideoEncodeAcceleratorValidationInfo[] = {
{&internal::VideoEncodeAccelerator_Initialize_Params_Data::Validate,
&internal::VideoEncodeAccelerator_Initialize_ResponseParams_Data::Validate},
{&internal::VideoEncodeAccelerator_Encode_Params_Data::Validate,
&internal::VideoEncodeAccelerator_Encode_ResponseParams_Data::Validate},
{&internal::VideoEncodeAccelerator_UseOutputBitstreamBuffer_Params_Data::Validate,
nullptr /* no response */},
{&internal::VideoEncodeAccelerator_RequestEncodingParametersChangeWithLayers_Params_Data::Validate,
nullptr /* no response */},
{&internal::VideoEncodeAccelerator_RequestEncodingParametersChangeWithBitrate_Params_Data::Validate,
nullptr /* no response */},
{&internal::VideoEncodeAccelerator_IsFlushSupported_Params_Data::Validate,
&internal::VideoEncodeAccelerator_IsFlushSupported_ResponseParams_Data::Validate},
{&internal::VideoEncodeAccelerator_Flush_Params_Data::Validate,
&internal::VideoEncodeAccelerator_Flush_ResponseParams_Data::Validate},
};
bool VideoEncodeAcceleratorRequestValidator::Accept(mojo::Message* message) {
const char* name = ::media::mojom::blink::VideoEncodeAccelerator::Name_;
return mojo::internal::ValidateRequestGenericPacked(message, name, kVideoEncodeAcceleratorValidationInfo);
}
bool VideoEncodeAcceleratorResponseValidator::Accept(mojo::Message* message) {
const char* name = ::media::mojom::blink::VideoEncodeAccelerator::Name_;
return mojo::internal::ValidateResponseGenericPacked(message, name, kVideoEncodeAcceleratorValidationInfo);
}
const char VideoEncodeAcceleratorClient::Name_[] = "media.mojom.VideoEncodeAcceleratorClient";
std::pair<uint32_t, const void*> VideoEncodeAcceleratorClient::MessageToMethodInfo_(mojo::Message& message) {
switch (message.name()) {
case internal::kVideoEncodeAcceleratorClient_RequireBitstreamBuffers_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAcceleratorClient::RequireBitstreamBuffers");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAcceleratorClient::RequireBitstreamBuffers_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAcceleratorClient_BitstreamBufferReady_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAcceleratorClient::BitstreamBufferReady");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAcceleratorClient::BitstreamBufferReady_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAcceleratorClient_NotifyError_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAcceleratorClient::NotifyError");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAcceleratorClient::NotifyError_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
case internal::kVideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Name: {
constexpr uint32_t value = base::MD5Hash32Constexpr(
"(Impl)media::mojom::VideoEncodeAcceleratorClient::NotifyEncoderInfoChange");
#if BUILDFLAG(IS_FUCHSIA)
return std::make_pair(value, nullptr);
#else
return std::make_pair(value, reinterpret_cast<const void*>(&VideoEncodeAcceleratorClient::NotifyEncoderInfoChange_Sym::IPCSymbol));
#endif // BUILDFLAG(IS_FUCHSIA)
}
}
return std::make_pair(0, nullptr);
}
const char* VideoEncodeAcceleratorClient::MessageToMethodName_(mojo::Message& message) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
bool is_response = message.has_flag(mojo::Message::kFlagIsResponse);
if (!is_response) {
switch (message.name()) {
case internal::kVideoEncodeAcceleratorClient_RequireBitstreamBuffers_Name:
return "Receive media::mojom::VideoEncodeAcceleratorClient::RequireBitstreamBuffers";
case internal::kVideoEncodeAcceleratorClient_BitstreamBufferReady_Name:
return "Receive media::mojom::VideoEncodeAcceleratorClient::BitstreamBufferReady";
case internal::kVideoEncodeAcceleratorClient_NotifyError_Name:
return "Receive media::mojom::VideoEncodeAcceleratorClient::NotifyError";
case internal::kVideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Name:
return "Receive media::mojom::VideoEncodeAcceleratorClient::NotifyEncoderInfoChange";
}
} else {
switch (message.name()) {
case internal::kVideoEncodeAcceleratorClient_RequireBitstreamBuffers_Name:
return "Receive reply media::mojom::VideoEncodeAcceleratorClient::RequireBitstreamBuffers";
case internal::kVideoEncodeAcceleratorClient_BitstreamBufferReady_Name:
return "Receive reply media::mojom::VideoEncodeAcceleratorClient::BitstreamBufferReady";
case internal::kVideoEncodeAcceleratorClient_NotifyError_Name:
return "Receive reply media::mojom::VideoEncodeAcceleratorClient::NotifyError";
case internal::kVideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Name:
return "Receive reply media::mojom::VideoEncodeAcceleratorClient::NotifyEncoderInfoChange";
}
}
return "Receive unknown mojo message";
#else
bool is_response = message.has_flag(mojo::Message::kFlagIsResponse);
if (is_response) {
return "Receive mojo reply";
} else {
return "Receive mojo message";
}
#endif // BUILDFLAG(MOJO_TRACE_ENABLED)
}
#if !BUILDFLAG(IS_FUCHSIA)
void VideoEncodeAcceleratorClient::RequireBitstreamBuffers_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAcceleratorClient::BitstreamBufferReady_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAcceleratorClient::NotifyError_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
void VideoEncodeAcceleratorClient::NotifyEncoderInfoChange_Sym::IPCSymbol() {
// This method's address is used for indetifiying the mojo method name after
// symblozation. So each IPCSymbol should have a unique address.
NO_CODE_FOLDING();
}
# endif // !BUILDFLAG(IS_FUCHSIA)
VideoEncodeAcceleratorClientProxy::VideoEncodeAcceleratorClientProxy(mojo::MessageReceiverWithResponder* receiver)
: receiver_(receiver) {
}
void VideoEncodeAcceleratorClientProxy::RequireBitstreamBuffers(
uint32_t in_input_count, const ::gfx::Size& in_input_coded_size, uint32_t in_output_buffer_size) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAcceleratorClient::RequireBitstreamBuffers", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("input_count"), in_input_count,
"<value of type uint32_t>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("input_coded_size"), in_input_coded_size,
"<value of type const ::gfx::Size&>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("output_buffer_size"), in_output_buffer_size,
"<value of type uint32_t>");
});
#endif
const bool kExpectsResponse = false;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAcceleratorClient_RequireBitstreamBuffers_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAcceleratorClient_RequireBitstreamBuffers_Params_Data> params(
message);
params.Allocate();
params->input_count = in_input_count;
mojo::internal::MessageFragment<
typename decltype(params->input_coded_size)::BaseType> input_coded_size_fragment(
params.message());
mojo::internal::Serialize<::gfx::mojom::SizeDataView>(
in_input_coded_size, input_coded_size_fragment);
params->input_coded_size.Set(
input_coded_size_fragment.is_null() ? nullptr : input_coded_size_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->input_coded_size.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null input_coded_size in VideoEncodeAcceleratorClient.RequireBitstreamBuffers request");
params->output_buffer_size = in_output_buffer_size;
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAcceleratorClient::Name_);
message.set_method_name("RequireBitstreamBuffers");
#endif
// This return value may be ignored as false implies the Connector has
// encountered an error, which will be visible through other means.
::mojo::internal::SendMessage(*receiver_, message);
}
void VideoEncodeAcceleratorClientProxy::BitstreamBufferReady(
int32_t in_bitstream_buffer_id, BitstreamBufferMetadataPtr in_metadata) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAcceleratorClient::BitstreamBufferReady", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("bitstream_buffer_id"), in_bitstream_buffer_id,
"<value of type int32_t>");
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("metadata"), in_metadata,
"<value of type BitstreamBufferMetadataPtr>");
});
#endif
const bool kExpectsResponse = false;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAcceleratorClient_BitstreamBufferReady_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAcceleratorClient_BitstreamBufferReady_Params_Data> params(
message);
params.Allocate();
params->bitstream_buffer_id = in_bitstream_buffer_id;
mojo::internal::MessageFragment<
typename decltype(params->metadata)::BaseType> metadata_fragment(
params.message());
mojo::internal::Serialize<::media::mojom::BitstreamBufferMetadataDataView>(
in_metadata, metadata_fragment);
params->metadata.Set(
metadata_fragment.is_null() ? nullptr : metadata_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->metadata.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null metadata in VideoEncodeAcceleratorClient.BitstreamBufferReady request");
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAcceleratorClient::Name_);
message.set_method_name("BitstreamBufferReady");
#endif
// This return value may be ignored as false implies the Connector has
// encountered an error, which will be visible through other means.
::mojo::internal::SendMessage(*receiver_, message);
}
void VideoEncodeAcceleratorClientProxy::NotifyError(
VideoEncodeAccelerator::Error in_error) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAcceleratorClient::NotifyError", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("error"), in_error,
"<value of type VideoEncodeAccelerator::Error>");
});
#endif
const bool kExpectsResponse = false;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAcceleratorClient_NotifyError_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAcceleratorClient_NotifyError_Params_Data> params(
message);
params.Allocate();
mojo::internal::Serialize<::media::mojom::VideoEncodeAccelerator_Error>(
in_error, &params->error);
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAcceleratorClient::Name_);
message.set_method_name("NotifyError");
#endif
// This return value may be ignored as false implies the Connector has
// encountered an error, which will be visible through other means.
::mojo::internal::SendMessage(*receiver_, message);
}
void VideoEncodeAcceleratorClientProxy::NotifyEncoderInfoChange(
::media::mojom::blink::VideoEncoderInfoPtr in_info) {
#if BUILDFLAG(MOJO_TRACE_ENABLED)
TRACE_EVENT1(
"mojom", "Send media::mojom::VideoEncodeAcceleratorClient::NotifyEncoderInfoChange", "input_parameters",
[&](perfetto::TracedValue context){
auto dict = std::move(context).WriteDictionary();
perfetto::WriteIntoTracedValueWithFallback(
dict.AddItem("info"), in_info,
"<value of type ::media::mojom::blink::VideoEncoderInfoPtr>");
});
#endif
const bool kExpectsResponse = false;
const bool kIsSync = false;
const bool kAllowInterrupt = true;
const uint32_t kFlags =
((kExpectsResponse) ? mojo::Message::kFlagExpectsResponse : 0) |
((kIsSync) ? mojo::Message::kFlagIsSync : 0) |
((kAllowInterrupt) ? 0 : mojo::Message::kFlagNoInterrupt);
mojo::Message message(
internal::kVideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Name, kFlags, 0, 0, nullptr);
mojo::internal::MessageFragment<
::media::mojom::internal::VideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Params_Data> params(
message);
params.Allocate();
mojo::internal::MessageFragment<
typename decltype(params->info)::BaseType> info_fragment(
params.message());
mojo::internal::Serialize<::media::mojom::VideoEncoderInfoDataView>(
in_info, info_fragment);
params->info.Set(
info_fragment.is_null() ? nullptr : info_fragment.data());
MOJO_INTERNAL_DLOG_SERIALIZATION_WARNING(
params->info.is_null(),
mojo::internal::VALIDATION_ERROR_UNEXPECTED_NULL_POINTER,
"null info in VideoEncodeAcceleratorClient.NotifyEncoderInfoChange request");
#if defined(ENABLE_IPC_FUZZER)
message.set_interface_name(VideoEncodeAcceleratorClient::Name_);
message.set_method_name("NotifyEncoderInfoChange");
#endif
// This return value may be ignored as false implies the Connector has
// encountered an error, which will be visible through other means.
::mojo::internal::SendMessage(*receiver_, message);
}
// static
bool VideoEncodeAcceleratorClientStubDispatch::Accept(
VideoEncodeAcceleratorClient* impl,
mojo::Message* message) {
switch (message->header()->name) {
case internal::kVideoEncodeAcceleratorClient_RequireBitstreamBuffers_Name: {
DCHECK(message->is_serialized());
internal::VideoEncodeAcceleratorClient_RequireBitstreamBuffers_Params_Data* params =
reinterpret_cast<internal::VideoEncodeAcceleratorClient_RequireBitstreamBuffers_Params_Data*>(
message->mutable_payload());
bool success = true;
uint32_t p_input_count{};
::gfx::Size p_input_coded_size{};
uint32_t p_output_buffer_size{};
VideoEncodeAcceleratorClient_RequireBitstreamBuffers_ParamsDataView input_data_view(params, message);
if (success)
p_input_count = input_data_view.input_count();
if (success && !input_data_view.ReadInputCodedSize(&p_input_coded_size))
success = false;
if (success)
p_output_buffer_size = input_data_view.output_buffer_size();
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAcceleratorClient::Name_, 0, false);
return false;
}
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->RequireBitstreamBuffers(
std::move(p_input_count),
std::move(p_input_coded_size),
std::move(p_output_buffer_size));
return true;
}
case internal::kVideoEncodeAcceleratorClient_BitstreamBufferReady_Name: {
DCHECK(message->is_serialized());
internal::VideoEncodeAcceleratorClient_BitstreamBufferReady_Params_Data* params =
reinterpret_cast<internal::VideoEncodeAcceleratorClient_BitstreamBufferReady_Params_Data*>(
message->mutable_payload());
bool success = true;
int32_t p_bitstream_buffer_id{};
BitstreamBufferMetadataPtr p_metadata{};
VideoEncodeAcceleratorClient_BitstreamBufferReady_ParamsDataView input_data_view(params, message);
if (success)
p_bitstream_buffer_id = input_data_view.bitstream_buffer_id();
if (success && !input_data_view.ReadMetadata(&p_metadata))
success = false;
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAcceleratorClient::Name_, 1, false);
return false;
}
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->BitstreamBufferReady(
std::move(p_bitstream_buffer_id),
std::move(p_metadata));
return true;
}
case internal::kVideoEncodeAcceleratorClient_NotifyError_Name: {
DCHECK(message->is_serialized());
internal::VideoEncodeAcceleratorClient_NotifyError_Params_Data* params =
reinterpret_cast<internal::VideoEncodeAcceleratorClient_NotifyError_Params_Data*>(
message->mutable_payload());
bool success = true;
VideoEncodeAccelerator::Error p_error{};
VideoEncodeAcceleratorClient_NotifyError_ParamsDataView input_data_view(params, message);
if (success && !input_data_view.ReadError(&p_error))
success = false;
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAcceleratorClient::Name_, 2, false);
return false;
}
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->NotifyError(
std::move(p_error));
return true;
}
case internal::kVideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Name: {
DCHECK(message->is_serialized());
internal::VideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Params_Data* params =
reinterpret_cast<internal::VideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Params_Data*>(
message->mutable_payload());
bool success = true;
::media::mojom::blink::VideoEncoderInfoPtr p_info{};
VideoEncodeAcceleratorClient_NotifyEncoderInfoChange_ParamsDataView input_data_view(params, message);
if (success && !input_data_view.ReadInfo(&p_info))
success = false;
if (!success) {
ReportValidationErrorForMessage(
message,
mojo::internal::VALIDATION_ERROR_DESERIALIZATION_FAILED,
VideoEncodeAcceleratorClient::Name_, 3, false);
return false;
}
// A null |impl| means no implementation was bound.
DCHECK(impl);
impl->NotifyEncoderInfoChange(
std::move(p_info));
return true;
}
}
return false;
}
// static
bool VideoEncodeAcceleratorClientStubDispatch::AcceptWithResponder(
VideoEncodeAcceleratorClient* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {
[[maybe_unused]] const bool message_is_sync =
message->has_flag(mojo::Message::kFlagIsSync);
[[maybe_unused]] const uint64_t request_id = message->request_id();
switch (message->header()->name) {
case internal::kVideoEncodeAcceleratorClient_RequireBitstreamBuffers_Name: {
break;
}
case internal::kVideoEncodeAcceleratorClient_BitstreamBufferReady_Name: {
break;
}
case internal::kVideoEncodeAcceleratorClient_NotifyError_Name: {
break;
}
case internal::kVideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Name: {
break;
}
}
return false;
}
static const mojo::internal::GenericValidationInfo kVideoEncodeAcceleratorClientValidationInfo[] = {
{&internal::VideoEncodeAcceleratorClient_RequireBitstreamBuffers_Params_Data::Validate,
nullptr /* no response */},
{&internal::VideoEncodeAcceleratorClient_BitstreamBufferReady_Params_Data::Validate,
nullptr /* no response */},
{&internal::VideoEncodeAcceleratorClient_NotifyError_Params_Data::Validate,
nullptr /* no response */},
{&internal::VideoEncodeAcceleratorClient_NotifyEncoderInfoChange_Params_Data::Validate,
nullptr /* no response */},
};
bool VideoEncodeAcceleratorClientRequestValidator::Accept(mojo::Message* message) {
const char* name = ::media::mojom::blink::VideoEncodeAcceleratorClient::Name_;
return mojo::internal::ValidateRequestGenericPacked(message, name, kVideoEncodeAcceleratorClientValidationInfo);
}
} // namespace blink
} // namespace mojom
} // namespace media
namespace mojo {
// static
bool StructTraits<::media::mojom::blink::VideoEncodeAcceleratorSupportedProfile::DataView, ::media::mojom::blink::VideoEncodeAcceleratorSupportedProfilePtr>::Read(
::media::mojom::blink::VideoEncodeAcceleratorSupportedProfile::DataView input,
::media::mojom::blink::VideoEncodeAcceleratorSupportedProfilePtr* output) {
bool success = true;
::media::mojom::blink::VideoEncodeAcceleratorSupportedProfilePtr result(::media::mojom::blink::VideoEncodeAcceleratorSupportedProfile::New());
if (success && !input.ReadProfile(&result->profile))
success = false;
if (success && !input.ReadMinResolution(&result->min_resolution))
success = false;
if (success && !input.ReadMaxResolution(&result->max_resolution))
success = false;
if (success)
result->max_framerate_numerator = input.max_framerate_numerator();
if (success)
result->max_framerate_denominator = input.max_framerate_denominator();
if (success && !input.ReadRateControlModes(&result->rate_control_modes))
success = false;
if (success && !input.ReadScalabilityModes(&result->scalability_modes))
success = false;
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::VariableBitratePeak::DataView, ::media::mojom::blink::VariableBitratePeakPtr>::Read(
::media::mojom::blink::VariableBitratePeak::DataView input,
::media::mojom::blink::VariableBitratePeakPtr* output) {
bool success = true;
::media::mojom::blink::VariableBitratePeakPtr result(::media::mojom::blink::VariableBitratePeak::New());
if (success)
result->bps = input.bps();
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::VideoBitrateAllocation::DataView, ::media::mojom::blink::VideoBitrateAllocationPtr>::Read(
::media::mojom::blink::VideoBitrateAllocation::DataView input,
::media::mojom::blink::VideoBitrateAllocationPtr* output) {
bool success = true;
::media::mojom::blink::VideoBitrateAllocationPtr result(::media::mojom::blink::VideoBitrateAllocation::New());
if (success && !input.ReadBitrates(&result->bitrates))
success = false;
if (success && !input.ReadVariableBitratePeak(&result->variable_bitrate_peak))
success = false;
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::SpatialLayer::DataView, ::media::mojom::blink::SpatialLayerPtr>::Read(
::media::mojom::blink::SpatialLayer::DataView input,
::media::mojom::blink::SpatialLayerPtr* output) {
bool success = true;
::media::mojom::blink::SpatialLayerPtr result(::media::mojom::blink::SpatialLayer::New());
if (success)
result->width = input.width();
if (success)
result->height = input.height();
if (success)
result->bitrate_bps = input.bitrate_bps();
if (success)
result->framerate = input.framerate();
if (success)
result->max_qp = input.max_qp();
if (success)
result->num_of_temporal_layers = input.num_of_temporal_layers();
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::ConstantBitrate::DataView, ::media::mojom::blink::ConstantBitratePtr>::Read(
::media::mojom::blink::ConstantBitrate::DataView input,
::media::mojom::blink::ConstantBitratePtr* output) {
bool success = true;
::media::mojom::blink::ConstantBitratePtr result(::media::mojom::blink::ConstantBitrate::New());
if (success)
result->target_bps = input.target_bps();
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::VariableBitrate::DataView, ::media::mojom::blink::VariableBitratePtr>::Read(
::media::mojom::blink::VariableBitrate::DataView input,
::media::mojom::blink::VariableBitratePtr* output) {
bool success = true;
::media::mojom::blink::VariableBitratePtr result(::media::mojom::blink::VariableBitrate::New());
if (success)
result->target_bps = input.target_bps();
if (success)
result->peak_bps = input.peak_bps();
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::VideoEncodeAcceleratorConfig::DataView, ::media::mojom::blink::VideoEncodeAcceleratorConfigPtr>::Read(
::media::mojom::blink::VideoEncodeAcceleratorConfig::DataView input,
::media::mojom::blink::VideoEncodeAcceleratorConfigPtr* output) {
bool success = true;
::media::mojom::blink::VideoEncodeAcceleratorConfigPtr result(::media::mojom::blink::VideoEncodeAcceleratorConfig::New());
if (success && !input.ReadInputFormat(&result->input_format))
success = false;
if (success && !input.ReadInputVisibleSize(&result->input_visible_size))
success = false;
if (success && !input.ReadOutputProfile(&result->output_profile))
success = false;
if (success && !input.ReadBitrate(&result->bitrate))
success = false;
if (success)
result->initial_framerate = input.initial_framerate();
if (success)
result->has_initial_framerate = input.has_initial_framerate();
if (success)
result->gop_length = input.gop_length();
if (success)
result->has_gop_length = input.has_gop_length();
if (success)
result->h264_output_level = input.h264_output_level();
if (success)
result->has_h264_output_level = input.has_h264_output_level();
if (success)
result->is_constrained_h264 = input.is_constrained_h264();
if (success && !input.ReadStorageType(&result->storage_type))
success = false;
if (success)
result->has_storage_type = input.has_storage_type();
if (success && !input.ReadContentType(&result->content_type))
success = false;
if (success && !input.ReadSpatialLayers(&result->spatial_layers))
success = false;
if (success && !input.ReadInterLayerPred(&result->inter_layer_pred))
success = false;
if (success)
result->require_low_delay = input.require_low_delay();
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::H264Metadata::DataView, ::media::mojom::blink::H264MetadataPtr>::Read(
::media::mojom::blink::H264Metadata::DataView input,
::media::mojom::blink::H264MetadataPtr* output) {
bool success = true;
::media::mojom::blink::H264MetadataPtr result(::media::mojom::blink::H264Metadata::New());
if (success)
result->temporal_idx = input.temporal_idx();
if (success)
result->layer_sync = input.layer_sync();
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::Vp8Metadata::DataView, ::media::mojom::blink::Vp8MetadataPtr>::Read(
::media::mojom::blink::Vp8Metadata::DataView input,
::media::mojom::blink::Vp8MetadataPtr* output) {
bool success = true;
::media::mojom::blink::Vp8MetadataPtr result(::media::mojom::blink::Vp8Metadata::New());
if (success)
result->non_reference = input.non_reference();
if (success)
result->temporal_idx = input.temporal_idx();
if (success)
result->layer_sync = input.layer_sync();
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::Vp9Metadata::DataView, ::media::mojom::blink::Vp9MetadataPtr>::Read(
::media::mojom::blink::Vp9Metadata::DataView input,
::media::mojom::blink::Vp9MetadataPtr* output) {
bool success = true;
::media::mojom::blink::Vp9MetadataPtr result(::media::mojom::blink::Vp9Metadata::New());
if (success)
result->inter_pic_predicted = input.inter_pic_predicted();
if (success)
result->temporal_up_switch = input.temporal_up_switch();
if (success)
result->referenced_by_upper_spatial_layers = input.referenced_by_upper_spatial_layers();
if (success)
result->reference_lower_spatial_layers = input.reference_lower_spatial_layers();
if (success)
result->end_of_picture = input.end_of_picture();
if (success)
result->temporal_idx = input.temporal_idx();
if (success)
result->spatial_idx = input.spatial_idx();
if (success && !input.ReadSpatialLayerResolutions(&result->spatial_layer_resolutions))
success = false;
if (success && !input.ReadPDiffs(&result->p_diffs))
success = false;
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::Av1Metadata::DataView, ::media::mojom::blink::Av1MetadataPtr>::Read(
::media::mojom::blink::Av1Metadata::DataView input,
::media::mojom::blink::Av1MetadataPtr* output) {
bool success = true;
::media::mojom::blink::Av1MetadataPtr result(::media::mojom::blink::Av1Metadata::New());
if (success)
result->inter_pic_predicted = input.inter_pic_predicted();
if (success)
result->switch_frame = input.switch_frame();
if (success)
result->end_of_picture = input.end_of_picture();
if (success)
result->temporal_idx = input.temporal_idx();
if (success)
result->spatial_idx = input.spatial_idx();
if (success && !input.ReadSpatialLayerResolutions(&result->spatial_layer_resolutions))
success = false;
if (success && !input.ReadFDiffs(&result->f_diffs))
success = false;
*output = std::move(result);
return success;
}
// static
bool StructTraits<::media::mojom::blink::BitstreamBufferMetadata::DataView, ::media::mojom::blink::BitstreamBufferMetadataPtr>::Read(
::media::mojom::blink::BitstreamBufferMetadata::DataView input,
::media::mojom::blink::BitstreamBufferMetadataPtr* output) {
bool success = true;
::media::mojom::blink::BitstreamBufferMetadataPtr result(::media::mojom::blink::BitstreamBufferMetadata::New());
if (success)
result->payload_size_bytes = input.payload_size_bytes();
if (success)
result->key_frame = input.key_frame();
if (success && !input.ReadTimestamp(&result->timestamp))
success = false;
if (success)
result->qp = input.qp();
if (success && !input.ReadCodecMetadata(&result->codec_metadata))
success = false;
*output = std::move(result);
return success;
}
// static
bool UnionTraits<::media::mojom::blink::Bitrate::DataView, ::media::mojom::blink::BitratePtr>::Read(
::media::mojom::blink::Bitrate::DataView input,
::media::mojom::blink::BitratePtr* output) {
using UnionType = ::media::mojom::blink::Bitrate;
using Tag = UnionType::Tag;
switch (input.tag()) {
case Tag::kConstant: {
::media::mojom::blink::ConstantBitratePtr result_constant;
if (!input.ReadConstant(&result_constant))
return false;
*output = UnionType::NewConstant(
std::move(result_constant));
break;
}
case Tag::kVariable: {
::media::mojom::blink::VariableBitratePtr result_variable;
if (!input.ReadVariable(&result_variable))
return false;
*output = UnionType::NewVariable(
std::move(result_variable));
break;
}
default:
return false;
}
return true;
}
// static
bool UnionTraits<::media::mojom::blink::CodecMetadata::DataView, ::media::mojom::blink::CodecMetadataPtr>::Read(
::media::mojom::blink::CodecMetadata::DataView input,
::media::mojom::blink::CodecMetadataPtr* output) {
using UnionType = ::media::mojom::blink::CodecMetadata;
using Tag = UnionType::Tag;
switch (input.tag()) {
case Tag::kH264: {
::media::mojom::blink::H264MetadataPtr result_h264;
if (!input.ReadH264(&result_h264))
return false;
*output = UnionType::NewH264(
std::move(result_h264));
break;
}
case Tag::kVp8: {
::media::mojom::blink::Vp8MetadataPtr result_vp8;
if (!input.ReadVp8(&result_vp8))
return false;
*output = UnionType::NewVp8(
std::move(result_vp8));
break;
}
case Tag::kVp9: {
::media::mojom::blink::Vp9MetadataPtr result_vp9;
if (!input.ReadVp9(&result_vp9))
return false;
*output = UnionType::NewVp9(
std::move(result_vp9));
break;
}
case Tag::kAv1: {
::media::mojom::blink::Av1MetadataPtr result_av1;
if (!input.ReadAv1(&result_av1))
return false;
*output = UnionType::NewAv1(
std::move(result_av1));
break;
}
default:
return false;
}
return true;
}
} // namespace mojo
// Symbols declared in the -test-utils.h header are defined here instead of a
// separate .cc file to save compile time.
namespace media {
namespace mojom {
namespace blink {
void VideoEncodeAcceleratorProviderInterceptorForTesting::CreateVideoEncodeAccelerator(::mojo::PendingReceiver<VideoEncodeAccelerator> receiver) {
GetForwardingInterface()->CreateVideoEncodeAccelerator(std::move(receiver));
}
void VideoEncodeAcceleratorProviderInterceptorForTesting::GetVideoEncodeAcceleratorSupportedProfiles(GetVideoEncodeAcceleratorSupportedProfilesCallback callback) {
GetForwardingInterface()->GetVideoEncodeAcceleratorSupportedProfiles(std::move(callback));
}
VideoEncodeAcceleratorProviderAsyncWaiter::VideoEncodeAcceleratorProviderAsyncWaiter(
VideoEncodeAcceleratorProvider* proxy) : proxy_(proxy) {}
VideoEncodeAcceleratorProviderAsyncWaiter::~VideoEncodeAcceleratorProviderAsyncWaiter() = default;
void VideoEncodeAcceleratorProviderAsyncWaiter::GetVideoEncodeAcceleratorSupportedProfiles(
WTF::Vector<VideoEncodeAcceleratorSupportedProfilePtr>* out_profiles) {
base::RunLoop loop;
proxy_->GetVideoEncodeAcceleratorSupportedProfiles(
base::BindOnce(
[](base::RunLoop* loop,
WTF::Vector<VideoEncodeAcceleratorSupportedProfilePtr>* out_profiles
,
WTF::Vector<VideoEncodeAcceleratorSupportedProfilePtr> profiles) {*out_profiles = std::move(profiles);
loop->Quit();
},
&loop,
out_profiles));
loop.Run();
}
void VideoEncodeAcceleratorInterceptorForTesting::Initialize(VideoEncodeAcceleratorConfigPtr config, ::mojo::PendingAssociatedRemote<VideoEncodeAcceleratorClient> client, ::mojo::PendingRemote<::media::mojom::blink::MediaLog> media_log, InitializeCallback callback) {
GetForwardingInterface()->Initialize(std::move(config), std::move(client), std::move(media_log), std::move(callback));
}
void VideoEncodeAcceleratorInterceptorForTesting::Encode(::media::mojom::blink::VideoFramePtr frame, bool force_keyframe, EncodeCallback callback) {
GetForwardingInterface()->Encode(std::move(frame), std::move(force_keyframe), std::move(callback));
}
void VideoEncodeAcceleratorInterceptorForTesting::UseOutputBitstreamBuffer(int32_t bitstream_buffer_id, ::base::UnsafeSharedMemoryRegion region) {
GetForwardingInterface()->UseOutputBitstreamBuffer(std::move(bitstream_buffer_id), std::move(region));
}
void VideoEncodeAcceleratorInterceptorForTesting::RequestEncodingParametersChangeWithLayers(VideoBitrateAllocationPtr bitrate_allocation, uint32_t framerate) {
GetForwardingInterface()->RequestEncodingParametersChangeWithLayers(std::move(bitrate_allocation), std::move(framerate));
}
void VideoEncodeAcceleratorInterceptorForTesting::RequestEncodingParametersChangeWithBitrate(BitratePtr bitrate, uint32_t framerate) {
GetForwardingInterface()->RequestEncodingParametersChangeWithBitrate(std::move(bitrate), std::move(framerate));
}
void VideoEncodeAcceleratorInterceptorForTesting::IsFlushSupported(IsFlushSupportedCallback callback) {
GetForwardingInterface()->IsFlushSupported(std::move(callback));
}
void VideoEncodeAcceleratorInterceptorForTesting::Flush(FlushCallback callback) {
GetForwardingInterface()->Flush(std::move(callback));
}
VideoEncodeAcceleratorAsyncWaiter::VideoEncodeAcceleratorAsyncWaiter(
VideoEncodeAccelerator* proxy) : proxy_(proxy) {}
VideoEncodeAcceleratorAsyncWaiter::~VideoEncodeAcceleratorAsyncWaiter() = default;
void VideoEncodeAcceleratorAsyncWaiter::Initialize(
VideoEncodeAcceleratorConfigPtr config, ::mojo::PendingAssociatedRemote<VideoEncodeAcceleratorClient> client, ::mojo::PendingRemote<::media::mojom::blink::MediaLog> media_log, bool* out_result) {
base::RunLoop loop;
proxy_->Initialize(std::move(config),std::move(client),std::move(media_log),
base::BindOnce(
[](base::RunLoop* loop,
bool* out_result
,
bool result) {*out_result = std::move(result);
loop->Quit();
},
&loop,
out_result));
loop.Run();
}
void VideoEncodeAcceleratorAsyncWaiter::Encode(
::media::mojom::blink::VideoFramePtr frame, bool force_keyframe) {
base::RunLoop loop;
proxy_->Encode(std::move(frame),std::move(force_keyframe),
base::BindOnce(
[](base::RunLoop* loop) {
loop->Quit();
},
&loop));
loop.Run();
}
void VideoEncodeAcceleratorAsyncWaiter::IsFlushSupported(
bool* out_result) {
base::RunLoop loop;
proxy_->IsFlushSupported(
base::BindOnce(
[](base::RunLoop* loop,
bool* out_result
,
bool result) {*out_result = std::move(result);
loop->Quit();
},
&loop,
out_result));
loop.Run();
}
void VideoEncodeAcceleratorAsyncWaiter::Flush(
bool* out_result) {
base::RunLoop loop;
proxy_->Flush(
base::BindOnce(
[](base::RunLoop* loop,
bool* out_result
,
bool result) {*out_result = std::move(result);
loop->Quit();
},
&loop,
out_result));
loop.Run();
}
void VideoEncodeAcceleratorClientInterceptorForTesting::RequireBitstreamBuffers(uint32_t input_count, const ::gfx::Size& input_coded_size, uint32_t output_buffer_size) {
GetForwardingInterface()->RequireBitstreamBuffers(std::move(input_count), std::move(input_coded_size), std::move(output_buffer_size));
}
void VideoEncodeAcceleratorClientInterceptorForTesting::BitstreamBufferReady(int32_t bitstream_buffer_id, BitstreamBufferMetadataPtr metadata) {
GetForwardingInterface()->BitstreamBufferReady(std::move(bitstream_buffer_id), std::move(metadata));
}
void VideoEncodeAcceleratorClientInterceptorForTesting::NotifyError(VideoEncodeAccelerator::Error error) {
GetForwardingInterface()->NotifyError(std::move(error));
}
void VideoEncodeAcceleratorClientInterceptorForTesting::NotifyEncoderInfoChange(::media::mojom::blink::VideoEncoderInfoPtr info) {
GetForwardingInterface()->NotifyEncoderInfoChange(std::move(info));
}
VideoEncodeAcceleratorClientAsyncWaiter::VideoEncodeAcceleratorClientAsyncWaiter(
VideoEncodeAcceleratorClient* proxy) : proxy_(proxy) {}
VideoEncodeAcceleratorClientAsyncWaiter::~VideoEncodeAcceleratorClientAsyncWaiter() = default;
} // namespace blink
} // namespace mojom
} // namespace media
#if defined(__clang__)
#pragma clang diagnostic pop
#endif