blob: b09114ee1f2f0e11c33e55aa435b4b0958a79a57 [file] [log] [blame]
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// A class to emulate GLES2 over command buffers.
#include "gpu/command_buffer/client/gles2_implementation.h"
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include <GLES2/gl2extchromium.h>
#include <GLES3/gl3.h>
#include <GLES3/gl31.h>
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include "base/atomic_sequence_num.h"
#include "base/bits.h"
#include "base/compiler_specific.h"
#include "base/containers/span.h"
#include "base/numerics/safe_math.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "gpu/command_buffer/client/buffer_tracker.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/gpu_control.h"
#include "gpu/command_buffer/client/program_info_manager.h"
#include "gpu/command_buffer/client/query_tracker.h"
#include "gpu/command_buffer/client/readback_buffer_shadow_tracker.h"
#include "gpu/command_buffer/client/shared_memory_limits.h"
#include "gpu/command_buffer/client/transfer_buffer.h"
#include "gpu/command_buffer/client/transfer_buffer_cmd_copy_helpers.h"
#include "gpu/command_buffer/client/vertex_array_object_manager.h"
#include "gpu/command_buffer/common/context_creation_attribs.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/common/id_allocator.h"
#include "gpu/command_buffer/common/swap_buffers_complete_params.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/rect_f.h"
#if !defined(__native_client__)
#include "ui/gfx/color_space.h"
#include "ui/gfx/ipc/color/gfx_param_traits.h"
#endif
#if defined(GPU_CLIENT_DEBUG)
#define GPU_CLIENT_SINGLE_THREAD_CHECK() \
DeferErrorCallbacks deferrer(this); \
SingleThreadChecker checker(this);
#else // !defined(GPU_CLIENT_DEBUG)
#define GPU_CLIENT_SINGLE_THREAD_CHECK() DeferErrorCallbacks deferrer(this);
#endif // defined(GPU_CLIENT_DEBUG)
// Check that destination pointers point to initialized memory.
// When the context is lost, calling GL function has no effect so if destination
// pointers point to initialized memory it can often lead to crash bugs. eg.
//
// GLsizei len;
// glGetShaderSource(shader, max_size, &len, buffer);
// std::string src(buffer, buffer + len); // len can be uninitialized here!!!
//
// Because this check is not official GL this check happens only on Chrome code,
// not Pepper.
//
// If it was up to us we'd just always write to the destination but the OpenGL
// spec defines the behavior of OpenGL functions, not us. :-(
#if defined(__native_client__) || defined(GLES2_CONFORMANCE_TESTS)
#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v)
#define GPU_CLIENT_DCHECK(v)
#elif defined(GPU_DCHECK)
#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) GPU_DCHECK(v)
#define GPU_CLIENT_DCHECK(v) GPU_DCHECK(v)
#elif defined(DCHECK)
#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) DCHECK(v)
#define GPU_CLIENT_DCHECK(v) DCHECK(v)
#else
#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) ASSERT(v)
#define GPU_CLIENT_DCHECK(v) ASSERT(v)
#endif
#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(type, ptr) \
GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT( \
ptr && \
(ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
#define GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(type, ptr) \
GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT( \
!ptr || \
(ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
namespace gpu {
namespace gles2 {
namespace {
void CopyRectToBuffer(const void* pixels,
uint32_t height,
uint32_t unpadded_row_size,
uint32_t pixels_padded_row_size,
void* buffer,
uint32_t buffer_padded_row_size) {
if (height == 0)
return;
const int8_t* source = static_cast<const int8_t*>(pixels);
int8_t* dest = static_cast<int8_t*>(buffer);
if (pixels_padded_row_size != buffer_padded_row_size) {
for (uint32_t ii = 0; ii < height; ++ii) {
memcpy(dest, source, unpadded_row_size);
dest += buffer_padded_row_size;
source += pixels_padded_row_size;
}
} else {
uint32_t size = (height - 1) * pixels_padded_row_size + unpadded_row_size;
memcpy(dest, source, size);
}
}
static base::AtomicSequenceNumber g_flush_id;
uint32_t GenerateNextFlushId() {
return static_cast<uint32_t>(g_flush_id.GetNext());
}
bool IsReadbackUsage(GLenum usage) {
return usage == GL_STREAM_READ || usage == GL_DYNAMIC_READ ||
usage == GL_STATIC_READ;
}
} // anonymous namespace
GLES2Implementation::GLStaticState::GLStaticState() = default;
GLES2Implementation::GLStaticState::~GLStaticState() = default;
GLES2Implementation::DeferErrorCallbacks::DeferErrorCallbacks(
GLES2Implementation* gles2_implementation)
: gles2_implementation_(gles2_implementation) {
DCHECK_EQ(false, gles2_implementation_->deferring_error_callbacks_);
gles2_implementation_->deferring_error_callbacks_ = true;
}
GLES2Implementation::DeferErrorCallbacks::~DeferErrorCallbacks() {
DCHECK_EQ(true, gles2_implementation_->deferring_error_callbacks_);
gles2_implementation_->deferring_error_callbacks_ = false;
gles2_implementation_->CallDeferredErrorCallbacks();
}
GLES2Implementation::DeferredErrorCallback::DeferredErrorCallback(
std::string message,
int32_t id)
: message(std::move(message)), id(id) {}
GLES2Implementation::SingleThreadChecker::SingleThreadChecker(
GLES2Implementation* gles2_implementation)
: gles2_implementation_(gles2_implementation) {
CHECK_EQ(0, gles2_implementation_->use_count_);
++gles2_implementation_->use_count_;
}
GLES2Implementation::SingleThreadChecker::~SingleThreadChecker() {
--gles2_implementation_->use_count_;
CHECK_EQ(0, gles2_implementation_->use_count_);
}
GLES2Implementation::GLES2Implementation(
GLES2CmdHelper* helper,
scoped_refptr<ShareGroup> share_group,
TransferBufferInterface* transfer_buffer,
bool bind_generates_resource,
bool lose_context_when_out_of_memory,
bool support_client_side_arrays,
GpuControl* gpu_control)
: ImplementationBase(helper, transfer_buffer, gpu_control),
helper_(helper),
chromium_framebuffer_multisample_(kUnknownExtensionStatus),
pack_alignment_(4),
pack_row_length_(0),
pack_skip_pixels_(0),
pack_skip_rows_(0),
unpack_alignment_(4),
unpack_row_length_(0),
unpack_image_height_(0),
unpack_skip_rows_(0),
unpack_skip_pixels_(0),
unpack_skip_images_(0),
active_texture_unit_(0),
bound_framebuffer_(0),
bound_read_framebuffer_(0),
bound_renderbuffer_(0),
current_program_(0),
bound_array_buffer_(0),
bound_atomic_counter_buffer_(0),
bound_copy_read_buffer_(0),
bound_copy_write_buffer_(0),
bound_pixel_pack_buffer_(0),
bound_pixel_unpack_buffer_(0),
bound_shader_storage_buffer_(0),
bound_transform_feedback_buffer_(0),
bound_uniform_buffer_(0),
bound_pixel_pack_transfer_buffer_id_(0),
bound_pixel_unpack_transfer_buffer_id_(0),
error_bits_(0),
lose_context_when_out_of_memory_(lose_context_when_out_of_memory),
support_client_side_arrays_(support_client_side_arrays),
use_count_(0),
flush_id_(0),
max_extra_transfer_buffer_size_(0),
current_trace_stack_(0),
aggressively_free_resources_(false),
cached_extension_string_(nullptr),
weak_ptr_factory_(this) {
DCHECK(helper);
std::stringstream ss;
ss << std::hex << this;
this_in_hex_ = ss.str();
share_group_ =
(share_group ? std::move(share_group)
: new ShareGroup(
bind_generates_resource,
gpu_control_->GetCommandBufferID().GetUnsafeValue()));
DCHECK(share_group_->bind_generates_resource() == bind_generates_resource);
memset(&reserved_ids_, 0, sizeof(reserved_ids_));
}
gpu::ContextResult GLES2Implementation::Initialize(
const SharedMemoryLimits& limits) {
TRACE_EVENT0("gpu", "GLES2Implementation::Initialize");
auto result = ImplementationBase::Initialize(limits);
if (result != gpu::ContextResult::kSuccess) {
return result;
}
max_extra_transfer_buffer_size_ = limits.max_mapped_memory_for_texture_upload;
GLStaticState::ShaderPrecisionMap* shader_precisions =
&static_state_.shader_precisions;
capabilities_.VisitPrecisions(
[shader_precisions](GLenum shader, GLenum type,
Capabilities::ShaderPrecision* result) {
const GLStaticState::ShaderPrecisionKey key(shader, type);
cmds::GetShaderPrecisionFormat::Result cached_result = {
true, result->min_range, result->max_range, result->precision};
shader_precisions->insert(std::make_pair(key, cached_result));
});
util_.set_num_compressed_texture_formats(
capabilities_.num_compressed_texture_formats);
util_.set_num_shader_binary_formats(capabilities_.num_shader_binary_formats);
texture_units_ = std::make_unique<TextureUnit[]>(
capabilities_.max_combined_texture_image_units);
buffer_tracker_ = std::make_unique<BufferTracker>(mapped_memory_.get());
readback_buffer_shadow_tracker_ =
std::make_unique<ReadbackBufferShadowTracker>(mapped_memory_.get(),
helper_);
for (int i = 0; i < static_cast<int>(IdNamespaces::kNumIdNamespaces); ++i)
id_allocators_[i].reset(new IdAllocator());
if (support_client_side_arrays_) {
GetIdHandler(SharedIdNamespaces::kBuffers)
->MakeIds(this, kClientSideArrayId, arraysize(reserved_ids_),
&reserved_ids_[0]);
}
vertex_array_object_manager_.reset(new VertexArrayObjectManager(
capabilities_.max_vertex_attribs, reserved_ids_[0], reserved_ids_[1],
support_client_side_arrays_));
// GL_BIND_GENERATES_RESOURCE_CHROMIUM state must be the same
// on Client & Service.
if (capabilities_.bind_generates_resource_chromium !=
(share_group_->bind_generates_resource() ? 1 : 0)) {
SetGLError(GL_INVALID_OPERATION, "Initialize",
"Service bind_generates_resource mismatch.");
LOG(ERROR) << "ContextResult::kFatalFailure: "
<< "bind_generates_resource mismatch";
return gpu::ContextResult::kFatalFailure;
}
return gpu::ContextResult::kSuccess;
}
GLES2Implementation::~GLES2Implementation() {
// Make sure the queries are finished otherwise we'll delete the
// shared memory (mapped_memory_) which will free the memory used
// by the queries. The GPU process when validating that memory is still
// shared will fail and abort (ie, it will stop running).
WaitForCmd();
query_tracker_.reset();
// GLES2Implementation::Initialize() could fail before allocating
// reserved_ids_, so we need delete them carefully.
if (support_client_side_arrays_ && reserved_ids_[0]) {
DeleteBuffers(arraysize(reserved_ids_), &reserved_ids_[0]);
}
// Release remaining BufferRange mem; This is when a MapBufferRange() is
// called but not the UnmapBuffer() pair.
ClearMappedBufferRangeMap();
// Release any per-context data in share group.
share_group_->FreeContext(this);
buffer_tracker_.reset();
readback_buffer_shadow_tracker_.reset();
// Make sure the commands make it the service.
WaitForCmd();
}
GLES2CmdHelper* GLES2Implementation::helper() const {
return helper_;
}
IdHandlerInterface* GLES2Implementation::GetIdHandler(
SharedIdNamespaces namespace_id) const {
return share_group_->GetIdHandler(namespace_id);
}
RangeIdHandlerInterface* GLES2Implementation::GetRangeIdHandler(
int namespace_id) const {
return share_group_->GetRangeIdHandler(namespace_id);
}
IdAllocator* GLES2Implementation::GetIdAllocator(
IdNamespaces namespace_id) const {
return id_allocators_[static_cast<int>(namespace_id)].get();
}
void GLES2Implementation::OnGpuControlLostContext() {
// This should never occur more than once.
DCHECK(!lost_context_callback_run_);
lost_context_callback_run_ = true;
share_group_->Lose();
if (!lost_context_callback_.is_null()) {
std::move(lost_context_callback_).Run();
}
}
void GLES2Implementation::OnGpuControlLostContextMaybeReentrant() {
// Queries for lost context state should immediately reflect reality,
// but don't call out to clients yet to avoid them re-entering this
// class.
share_group_->Lose();
}
void GLES2Implementation::OnGpuControlErrorMessage(const char* message,
int32_t id) {
SendErrorMessage(message, id);
}
void GLES2Implementation::OnGpuControlSwapBuffersCompleted(
const SwapBuffersCompleteParams& params) {
auto found = pending_swap_callbacks_.find(params.swap_response.swap_id);
if (found == pending_swap_callbacks_.end())
return;
std::move(found->second).Run(params);
pending_swap_callbacks_.erase(found);
}
void GLES2Implementation::SendErrorMessage(std::string message, int32_t id) {
if (error_message_callback_.is_null())
return;
if (deferring_error_callbacks_) {
deferred_error_callbacks_.emplace_back(std::move(message), id);
return;
}
error_message_callback_.Run(message.c_str(), id);
}
void GLES2Implementation::CallDeferredErrorCallbacks() {
if (deferred_error_callbacks_.empty())
return;
if (error_message_callback_.is_null()) {
// User probably cleared this out.
deferred_error_callbacks_.clear();
return;
}
std::deque<DeferredErrorCallback> local_callbacks;
std::swap(deferred_error_callbacks_, local_callbacks);
for (auto c : local_callbacks) {
error_message_callback_.Run(c.message.c_str(), c.id);
}
}
void GLES2Implementation::OnSwapBufferPresented(
uint64_t swap_id,
const gfx::PresentationFeedback& feedback) {
auto found = pending_presentation_callbacks_.find(swap_id);
if (found == pending_presentation_callbacks_.end())
return;
std::move(found->second).Run(feedback);
pending_presentation_callbacks_.erase(found);
}
void GLES2Implementation::FreeSharedMemory(void* mem) {
mapped_memory_->FreePendingToken(mem, helper_->InsertToken());
}
GLuint GLES2Implementation::CreateGpuFenceCHROMIUM() {
GLuint client_id = GetIdAllocator(IdNamespaces::kGpuFences)
->AllocateIDAtOrAbove(last_gpu_fence_id_ + 1);
// Out of paranoia, don't allow IDs to wrap around to avoid potential
// collisions on reuse. The space of 2^32 IDs is enough for over a year of
// allocating two per frame at 60fps. TODO(crbug.com/790550): Revisit if this
// is an issue, for example by deferring ID release if they would be reissued
// too soon.
CHECK(client_id > last_gpu_fence_id_) << "ID wrap prevented";
last_gpu_fence_id_ = client_id;
helper_->CreateGpuFenceINTERNAL(client_id);
GPU_CLIENT_LOG("returned " << client_id);
CheckGLError();
return client_id;
}
GLuint GLES2Implementation::CreateClientGpuFenceCHROMIUM(
ClientGpuFence source) {
GLuint client_id = GetIdAllocator(IdNamespaces::kGpuFences)
->AllocateIDAtOrAbove(last_gpu_fence_id_ + 1);
// See CreateGpuFenceCHROMIUM comment re wraparound.
CHECK(client_id > last_gpu_fence_id_) << "ID wrap prevented";
last_gpu_fence_id_ = client_id;
// Create the service-side GpuFenceEntry via gpu_control. This is guaranteed
// to arrive before any future GL helper_ commands on this stream, so it's
// safe to use the client_id generated here in following commands such as
// WaitGpuFenceCHROMIUM without explicit flushing.
gpu_control_->CreateGpuFence(client_id, source);
GPU_CLIENT_LOG("returned " << client_id);
CheckGLError();
return client_id;
}
void GLES2Implementation::DestroyGpuFenceCHROMIUMHelper(GLuint client_id) {
if (GetIdAllocator(IdNamespaces::kGpuFences)->InUse(client_id)) {
GetIdAllocator(IdNamespaces::kGpuFences)->FreeID(client_id);
helper_->DestroyGpuFenceCHROMIUM(client_id);
} else {
SetGLError(GL_INVALID_VALUE, "glDestroyGpuFenceCHROMIUM",
"id not created by this context.");
}
}
void GLES2Implementation::SetAggressivelyFreeResources(
bool aggressively_free_resources) {
TRACE_EVENT1("gpu", "GLES2Implementation::SetAggressivelyFreeResources",
"aggressively_free_resources", aggressively_free_resources);
aggressively_free_resources_ = aggressively_free_resources;
if (aggressively_free_resources_ && helper_->HaveRingBuffer()) {
// Ensure that we clean up as much cache memory as possible and fully flush.
FlushDriverCachesCHROMIUM();
// Flush will delete transfer buffer resources if
// |aggressively_free_resources_| is true.
Flush();
} else {
ShallowFlushCHROMIUM();
}
}
bool GLES2Implementation::IsExtensionAvailable(const char* ext) {
const char* extensions =
reinterpret_cast<const char*>(GetStringHelper(GL_EXTENSIONS));
if (!extensions)
return false;
int length = strlen(ext);
while (true) {
int n = strcspn(extensions, " ");
if (n == length && 0 == strncmp(ext, extensions, length)) {
return true;
}
if ('\0' == extensions[n]) {
return false;
}
extensions += n + 1;
}
}
bool GLES2Implementation::IsExtensionAvailableHelper(const char* extension,
ExtensionStatus* status) {
switch (*status) {
case kAvailableExtensionStatus:
return true;
case kUnavailableExtensionStatus:
return false;
default: {
bool available = IsExtensionAvailable(extension);
*status =
available ? kAvailableExtensionStatus : kUnavailableExtensionStatus;
return available;
}
}
}
bool GLES2Implementation::IsChromiumFramebufferMultisampleAvailable() {
return IsExtensionAvailableHelper("GL_CHROMIUM_framebuffer_multisample",
&chromium_framebuffer_multisample_);
}
const std::string& GLES2Implementation::GetLogPrefix() const {
const std::string& prefix(debug_marker_manager_.GetMarker());
return prefix.empty() ? this_in_hex_ : prefix;
}
GLenum GLES2Implementation::GetError() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetError()");
GLenum err = GetGLError();
GPU_CLIENT_LOG("returned " << GLES2Util::GetStringError(err));
return err;
}
GLenum GLES2Implementation::GetGLError() {
TRACE_EVENT0("gpu", "GLES2::GetGLError");
// Check the GL error first, then our wrapped error.
typedef cmds::GetError::Result Result;
auto result = GetResultAs<Result>();
// If we couldn't allocate a result the context is lost.
if (!result) {
return GL_NO_ERROR;
}
*result = GL_NO_ERROR;
helper_->GetError(GetResultShmId(), result.offset());
WaitForCmd();
GLenum error = *result;
if (error == GL_NO_ERROR) {
error = GetClientSideGLError();
} else {
// There was an error, clear the corresponding wrapped error.
error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
}
return error;
}
#if defined(GL_CLIENT_FAIL_GL_ERRORS)
void GLES2Implementation::FailGLError(GLenum error) {
if (error != GL_NO_ERROR) {
NOTREACHED() << "Error";
}
}
// NOTE: Calling GetGLError overwrites data in the result buffer.
void GLES2Implementation::CheckGLError() {
FailGLError(GetGLError());
}
#endif // defined(GPU_CLIENT_FAIL_GL_ERRORS)
void GLES2Implementation::SetGLError(GLenum error,
const char* function_name,
const char* msg) {
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] Client Synthesized Error: "
<< GLES2Util::GetStringError(error) << ": "
<< function_name << ": " << msg);
FailGLError(error);
if (msg) {
last_error_ = msg;
}
if (!error_message_callback_.is_null()) {
std::string temp(GLES2Util::GetStringError(error) + " : " + function_name +
": " + (msg ? msg : ""));
SendErrorMessage(temp.c_str(), 0);
}
error_bits_ |= GLES2Util::GLErrorToErrorBit(error);
if (error == GL_OUT_OF_MEMORY && lose_context_when_out_of_memory_) {
helper_->LoseContextCHROMIUM(GL_GUILTY_CONTEXT_RESET_ARB,
GL_UNKNOWN_CONTEXT_RESET_ARB);
}
}
void GLES2Implementation::SetGLErrorInvalidEnum(const char* function_name,
GLenum value,
const char* label) {
SetGLError(
GL_INVALID_ENUM, function_name,
(std::string(label) + " was " + GLES2Util::GetStringEnum(value)).c_str());
}
void GLES2Implementation::Disable(GLenum cap) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDisable("
<< GLES2Util::GetStringCapability(cap) << ")");
bool changed = false;
if (!state_.SetCapabilityState(cap, false, &changed) || changed) {
helper_->Disable(cap);
}
CheckGLError();
}
void GLES2Implementation::Enable(GLenum cap) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnable("
<< GLES2Util::GetStringCapability(cap) << ")");
bool changed = false;
if (!state_.SetCapabilityState(cap, true, &changed) || changed) {
helper_->Enable(cap);
}
CheckGLError();
}
GLboolean GLES2Implementation::IsEnabled(GLenum cap) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsEnabled("
<< GLES2Util::GetStringCapability(cap) << ")");
bool state = false;
if (!state_.GetEnabled(cap, &state)) {
typedef cmds::IsEnabled::Result Result;
auto result = GetResultAs<Result>();
if (!result) {
return GL_FALSE;
}
*result = 0;
helper_->IsEnabled(cap, GetResultShmId(), result.offset());
WaitForCmd();
state = (*result) != 0;
}
GPU_CLIENT_LOG("returned " << state);
CheckGLError();
return state;
}
bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
// TODO(zmo): For all the BINDING points, there is a possibility where
// resources are shared among multiple contexts, that the cached points
// are invalid. It is not a problem for now, but once we allow resource
// sharing in WebGL, we need to implement a mechanism to allow correct
// client side binding points tracking. crbug.com/465562.
// ES2 parameters.
switch (pname) {
case GL_ACTIVE_TEXTURE:
*params = active_texture_unit_ + GL_TEXTURE0;
return true;
case GL_ARRAY_BUFFER_BINDING:
*params = bound_array_buffer_;
return true;
case GL_ELEMENT_ARRAY_BUFFER_BINDING:
*params = vertex_array_object_manager_->bound_element_array_buffer();
return true;
case GL_FRAMEBUFFER_BINDING:
*params = bound_framebuffer_;
return true;
case GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS:
*params = capabilities_.max_combined_texture_image_units;
return true;
case GL_MAX_CUBE_MAP_TEXTURE_SIZE:
*params = capabilities_.max_cube_map_texture_size;
return true;
case GL_MAX_FRAGMENT_UNIFORM_VECTORS:
*params = capabilities_.max_fragment_uniform_vectors;
return true;
case GL_MAX_RENDERBUFFER_SIZE:
*params = capabilities_.max_renderbuffer_size;
return true;
case GL_MAX_TEXTURE_IMAGE_UNITS:
*params = capabilities_.max_texture_image_units;
return true;
case GL_MAX_TEXTURE_SIZE:
*params = capabilities_.max_texture_size;
return true;
case GL_MAX_VARYING_VECTORS:
*params = capabilities_.max_varying_vectors;
return true;
case GL_MAX_VERTEX_ATTRIBS:
*params = capabilities_.max_vertex_attribs;
return true;
case GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS:
*params = capabilities_.max_vertex_texture_image_units;
return true;
case GL_MAX_VERTEX_UNIFORM_VECTORS:
*params = capabilities_.max_vertex_uniform_vectors;
return true;
case GL_MAX_VIEWPORT_DIMS:
if (capabilities_.max_viewport_width > 0 &&
capabilities_.max_viewport_height > 0) {
params[0] = capabilities_.max_viewport_width;
params[1] = capabilities_.max_viewport_height;
return true;
}
// If they are not cached on the client side yet, query the service side.
return false;
case GL_NUM_COMPRESSED_TEXTURE_FORMATS:
*params = capabilities_.num_compressed_texture_formats;
return true;
case GL_NUM_SHADER_BINARY_FORMATS:
*params = capabilities_.num_shader_binary_formats;
return true;
case GL_RENDERBUFFER_BINDING:
*params = bound_renderbuffer_;
return true;
case GL_TEXTURE_BINDING_2D:
*params = texture_units_[active_texture_unit_].bound_texture_2d;
return true;
case GL_TEXTURE_BINDING_CUBE_MAP:
*params = texture_units_[active_texture_unit_].bound_texture_cube_map;
return true;
// Non-standard parameters.
case GL_TEXTURE_BINDING_EXTERNAL_OES:
*params = texture_units_[active_texture_unit_].bound_texture_external_oes;
return true;
case GL_TEXTURE_BINDING_RECTANGLE_ARB:
*params =
texture_units_[active_texture_unit_].bound_texture_rectangle_arb;
return true;
case GL_PIXEL_PACK_TRANSFER_BUFFER_BINDING_CHROMIUM:
*params = bound_pixel_pack_transfer_buffer_id_;
return true;
case GL_PIXEL_UNPACK_TRANSFER_BUFFER_BINDING_CHROMIUM:
*params = bound_pixel_unpack_transfer_buffer_id_;
return true;
case GL_READ_FRAMEBUFFER_BINDING:
if (capabilities_.major_version >= 3 ||
IsChromiumFramebufferMultisampleAvailable()) {
*params = bound_read_framebuffer_;
return true;
}
break;
case GL_TIMESTAMP_EXT:
// We convert all GPU timestamps to CPU time.
*params = base::saturated_cast<GLint>(
(base::TimeTicks::Now() - base::TimeTicks()).InMicroseconds() *
base::Time::kNanosecondsPerMicrosecond);
return true;
case GL_GPU_DISJOINT_EXT:
*params = static_cast<GLint>(query_tracker_->CheckAndResetDisjoint());
return true;
case GL_VIEWPORT:
if (state_.viewport_width > 0 && state_.viewport_height > 0 &&
capabilities_.max_viewport_width > 0 &&
capabilities_.max_viewport_height > 0) {
params[0] = state_.viewport_x;
params[1] = state_.viewport_y;
params[2] =
std::min(state_.viewport_width, capabilities_.max_viewport_width);
params[3] =
std::min(state_.viewport_height, capabilities_.max_viewport_height);
return true;
}
// If they haven't been cached on the client side, go to service side
// to query the underlying driver.
return false;
// Non-cached parameters.
case GL_ALIASED_LINE_WIDTH_RANGE:
case GL_ALIASED_POINT_SIZE_RANGE:
case GL_ALPHA_BITS:
case GL_BLEND:
case GL_BLEND_COLOR:
case GL_BLEND_DST_ALPHA:
case GL_BLEND_DST_RGB:
case GL_BLEND_EQUATION_ALPHA:
case GL_BLEND_EQUATION_RGB:
case GL_BLEND_SRC_ALPHA:
case GL_BLEND_SRC_RGB:
case GL_BLUE_BITS:
case GL_COLOR_CLEAR_VALUE:
case GL_COLOR_WRITEMASK:
case GL_COMPRESSED_TEXTURE_FORMATS:
case GL_CULL_FACE:
case GL_CULL_FACE_MODE:
case GL_CURRENT_PROGRAM:
case GL_DEPTH_BITS:
case GL_DEPTH_CLEAR_VALUE:
case GL_DEPTH_FUNC:
case GL_DEPTH_RANGE:
case GL_DEPTH_TEST:
case GL_DEPTH_WRITEMASK:
case GL_DITHER:
case GL_FRONT_FACE:
case GL_GENERATE_MIPMAP_HINT:
case GL_GREEN_BITS:
case GL_IMPLEMENTATION_COLOR_READ_FORMAT:
case GL_IMPLEMENTATION_COLOR_READ_TYPE:
case GL_LINE_WIDTH:
case GL_PACK_ALIGNMENT:
case GL_POLYGON_OFFSET_FACTOR:
case GL_POLYGON_OFFSET_FILL:
case GL_POLYGON_OFFSET_UNITS:
case GL_RED_BITS:
case GL_SAMPLE_ALPHA_TO_COVERAGE:
case GL_SAMPLE_BUFFERS:
case GL_SAMPLE_COVERAGE:
case GL_SAMPLE_COVERAGE_INVERT:
case GL_SAMPLE_COVERAGE_VALUE:
case GL_SAMPLES:
case GL_SCISSOR_BOX:
case GL_SCISSOR_TEST:
case GL_SHADER_BINARY_FORMATS:
case GL_SHADER_COMPILER:
case GL_STENCIL_BACK_FAIL:
case GL_STENCIL_BACK_FUNC:
case GL_STENCIL_BACK_PASS_DEPTH_FAIL:
case GL_STENCIL_BACK_PASS_DEPTH_PASS:
case GL_STENCIL_BACK_REF:
case GL_STENCIL_BACK_VALUE_MASK:
case GL_STENCIL_BACK_WRITEMASK:
case GL_STENCIL_BITS:
case GL_STENCIL_CLEAR_VALUE:
case GL_STENCIL_FAIL:
case GL_STENCIL_FUNC:
case GL_STENCIL_PASS_DEPTH_FAIL:
case GL_STENCIL_PASS_DEPTH_PASS:
case GL_STENCIL_REF:
case GL_STENCIL_TEST:
case GL_STENCIL_VALUE_MASK:
case GL_STENCIL_WRITEMASK:
case GL_SUBPIXEL_BITS:
case GL_UNPACK_ALIGNMENT:
return false;
default:
break;
}
if (capabilities_.major_version < 3) {
return false;
}
// ES3 parameters.
switch (pname) {
case GL_COPY_READ_BUFFER_BINDING:
*params = bound_copy_read_buffer_;
return true;
case GL_COPY_WRITE_BUFFER_BINDING:
*params = bound_copy_write_buffer_;
return true;
case GL_MAJOR_VERSION:
*params = capabilities_.major_version;
return true;
case GL_MAX_3D_TEXTURE_SIZE:
*params = capabilities_.max_3d_texture_size;
return true;
case GL_MAX_ARRAY_TEXTURE_LAYERS:
*params = capabilities_.max_array_texture_layers;
return true;
case GL_MAX_COLOR_ATTACHMENTS:
*params = capabilities_.max_color_attachments;
return true;
case GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS:
*params = static_cast<GLint>(
capabilities_.max_combined_fragment_uniform_components);
return true;
case GL_MAX_COMBINED_UNIFORM_BLOCKS:
*params = capabilities_.max_combined_uniform_blocks;
return true;
case GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS:
*params = static_cast<GLint>(
capabilities_.max_combined_vertex_uniform_components);
return true;
case GL_MAX_DRAW_BUFFERS:
*params = capabilities_.max_draw_buffers;
return true;
case GL_MAX_ELEMENT_INDEX:
*params = static_cast<GLint>(capabilities_.max_element_index);
return true;
case GL_MAX_ELEMENTS_INDICES:
*params = capabilities_.max_elements_indices;
return true;
case GL_MAX_ELEMENTS_VERTICES:
*params = capabilities_.max_elements_vertices;
return true;
case GL_MAX_FRAGMENT_INPUT_COMPONENTS:
*params = capabilities_.max_fragment_input_components;
return true;
case GL_MAX_FRAGMENT_UNIFORM_BLOCKS:
*params = capabilities_.max_fragment_uniform_blocks;
return true;
case GL_MAX_FRAGMENT_UNIFORM_COMPONENTS:
*params = capabilities_.max_fragment_uniform_components;
return true;
case GL_MAX_PROGRAM_TEXEL_OFFSET:
*params = capabilities_.max_program_texel_offset;
return true;
case GL_MAX_SAMPLES:
*params = capabilities_.max_samples;
return true;
case GL_MAX_SERVER_WAIT_TIMEOUT:
*params = static_cast<GLint>(capabilities_.max_server_wait_timeout);
return true;
case GL_MAX_TEXTURE_LOD_BIAS:
*params = static_cast<GLint>(capabilities_.max_texture_lod_bias);
return true;
case GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS:
*params = capabilities_.max_transform_feedback_interleaved_components;
return true;
case GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS:
*params = capabilities_.max_transform_feedback_separate_attribs;
return true;
case GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS:
*params = capabilities_.max_transform_feedback_separate_components;
return true;
case GL_MAX_UNIFORM_BLOCK_SIZE:
*params = static_cast<GLint>(capabilities_.max_uniform_block_size);
return true;
case GL_MAX_UNIFORM_BUFFER_BINDINGS:
*params = capabilities_.max_uniform_buffer_bindings;
return true;
case GL_MAX_VARYING_COMPONENTS:
*params = capabilities_.max_varying_components;
return true;
case GL_MAX_VERTEX_OUTPUT_COMPONENTS:
*params = capabilities_.max_vertex_output_components;
return true;
case GL_MAX_VERTEX_UNIFORM_BLOCKS:
*params = capabilities_.max_vertex_uniform_blocks;
return true;
case GL_MAX_VERTEX_UNIFORM_COMPONENTS:
*params = capabilities_.max_vertex_uniform_components;
return true;
case GL_MIN_PROGRAM_TEXEL_OFFSET:
*params = capabilities_.min_program_texel_offset;
return true;
case GL_MINOR_VERSION:
*params = capabilities_.minor_version;
return true;
case GL_NUM_EXTENSIONS:
UpdateCachedExtensionsIfNeeded();
*params = cached_extensions_.size();
return true;
case GL_NUM_PROGRAM_BINARY_FORMATS:
*params = capabilities_.num_program_binary_formats;
return true;
case GL_PACK_SKIP_PIXELS:
*params = pack_skip_pixels_;
return true;
case GL_PACK_SKIP_ROWS:
*params = pack_skip_rows_;
return true;
case GL_PIXEL_PACK_BUFFER_BINDING:
*params = bound_pixel_pack_buffer_;
return true;
case GL_PIXEL_UNPACK_BUFFER_BINDING:
*params = bound_pixel_unpack_buffer_;
return true;
case GL_TRANSFORM_FEEDBACK_BUFFER_BINDING:
*params = bound_transform_feedback_buffer_;
return true;
case GL_UNIFORM_BUFFER_BINDING:
*params = bound_uniform_buffer_;
return true;
case GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT:
*params = capabilities_.uniform_buffer_offset_alignment;
return true;
case GL_UNPACK_SKIP_IMAGES:
*params = unpack_skip_images_;
return true;
case GL_UNPACK_SKIP_PIXELS:
*params = unpack_skip_pixels_;
return true;
case GL_UNPACK_SKIP_ROWS:
*params = unpack_skip_rows_;
return true;
// Non-cached ES3 parameters.
case GL_DRAW_BUFFER0:
case GL_DRAW_BUFFER1:
case GL_DRAW_BUFFER2:
case GL_DRAW_BUFFER3:
case GL_DRAW_BUFFER4:
case GL_DRAW_BUFFER5:
case GL_DRAW_BUFFER6:
case GL_DRAW_BUFFER7:
case GL_DRAW_BUFFER8:
case GL_DRAW_BUFFER9:
case GL_DRAW_BUFFER10:
case GL_DRAW_BUFFER11:
case GL_DRAW_BUFFER12:
case GL_DRAW_BUFFER13:
case GL_DRAW_BUFFER14:
case GL_DRAW_BUFFER15:
case GL_DRAW_FRAMEBUFFER_BINDING:
case GL_FRAGMENT_SHADER_DERIVATIVE_HINT:
case GL_PACK_ROW_LENGTH:
case GL_PRIMITIVE_RESTART_FIXED_INDEX:
case GL_PROGRAM_BINARY_FORMATS:
case GL_RASTERIZER_DISCARD:
case GL_READ_BUFFER:
case GL_READ_FRAMEBUFFER_BINDING:
case GL_SAMPLER_BINDING:
case GL_TEXTURE_BINDING_2D_ARRAY:
case GL_TEXTURE_BINDING_3D:
case GL_TRANSFORM_FEEDBACK_BINDING:
case GL_TRANSFORM_FEEDBACK_ACTIVE:
case GL_TRANSFORM_FEEDBACK_PAUSED:
case GL_TRANSFORM_FEEDBACK_BUFFER_SIZE:
case GL_TRANSFORM_FEEDBACK_BUFFER_START:
case GL_UNIFORM_BUFFER_SIZE:
case GL_UNIFORM_BUFFER_START:
case GL_UNPACK_IMAGE_HEIGHT:
case GL_UNPACK_ROW_LENGTH:
case GL_VERTEX_ARRAY_BINDING:
return false;
default:
break;
}
if (capabilities_.minor_version < 1) {
return false;
}
// ES31 parameters.
switch (pname) {
case GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS:
*params = capabilities_.max_atomic_counter_buffer_bindings;
return true;
case GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS:
*params = capabilities_.max_shader_storage_buffer_bindings;
return true;
case GL_ATOMIC_COUNTER_BUFFER_BINDING:
*params = bound_atomic_counter_buffer_;
return true;
case GL_SHADER_STORAGE_BUFFER_BINDING:
*params = bound_shader_storage_buffer_;
return true;
case GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT:
*params = capabilities_.shader_storage_buffer_offset_alignment;
return true;
// Non-cached ES31 parameters.
case GL_ATOMIC_COUNTER_BUFFER_SIZE:
case GL_ATOMIC_COUNTER_BUFFER_START:
case GL_SHADER_STORAGE_BUFFER_SIZE:
case GL_SHADER_STORAGE_BUFFER_START:
return false;
default:
return false;
}
}
bool GLES2Implementation::GetBooleanvHelper(GLenum pname, GLboolean* params) {
// TODO(gman): Make this handle pnames that return more than 1 value.
GLint value;
if (!GetHelper(pname, &value)) {
return false;
}
*params = static_cast<GLboolean>(value);
return true;
}
bool GLES2Implementation::GetFloatvHelper(GLenum pname, GLfloat* params) {
// TODO(gman): Make this handle pnames that return more than 1 value.
switch (pname) {
case GL_MAX_TEXTURE_LOD_BIAS:
*params = capabilities_.max_texture_lod_bias;
return true;
default:
break;
}
GLint value;
if (!GetHelper(pname, &value)) {
return false;
}
*params = static_cast<GLfloat>(value);
return true;
}
bool GLES2Implementation::GetInteger64vHelper(GLenum pname, GLint64* params) {
switch (pname) {
case GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS:
*params = capabilities_.max_combined_fragment_uniform_components;
return true;
case GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS:
*params = capabilities_.max_combined_vertex_uniform_components;
return true;
case GL_MAX_ELEMENT_INDEX:
*params = capabilities_.max_element_index;
return true;
case GL_MAX_SERVER_WAIT_TIMEOUT:
*params = capabilities_.max_server_wait_timeout;
return true;
case GL_MAX_UNIFORM_BLOCK_SIZE:
*params = capabilities_.max_uniform_block_size;
return true;
case GL_TIMESTAMP_EXT:
// We convert all GPU timestamps to CPU time.
*params = (base::TimeTicks::Now() - base::TimeTicks()).InMicroseconds() *
base::Time::kNanosecondsPerMicrosecond;
return true;
default:
break;
}
GLint value;
if (!GetHelper(pname, &value)) {
return false;
}
*params = static_cast<GLint64>(value);
return true;
}
bool GLES2Implementation::GetIntegervHelper(GLenum pname, GLint* params) {
return GetHelper(pname, params);
}
bool GLES2Implementation::GetIntegeri_vHelper(GLenum pname,
GLuint index,
GLint* data) {
// TODO(zmo): Implement client side caching.
return false;
}
bool GLES2Implementation::GetInteger64i_vHelper(GLenum pname,
GLuint index,
GLint64* data) {
// TODO(zmo): Implement client side caching.
return false;
}
bool GLES2Implementation::GetInternalformativHelper(GLenum target,
GLenum format,
GLenum pname,
GLsizei bufSize,
GLint* params) {
// TODO(zmo): Implement the client side caching.
return false;
}
bool GLES2Implementation::GetSyncivHelper(GLsync sync,
GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLint* values) {
GLint value = 0;
switch (pname) {
case GL_OBJECT_TYPE:
value = GL_SYNC_FENCE;
break;
case GL_SYNC_CONDITION:
value = GL_SYNC_GPU_COMMANDS_COMPLETE;
break;
case GL_SYNC_FLAGS:
value = 0;
break;
default:
return false;
}
if (bufsize > 0) {
DCHECK(values);
*values = value;
}
if (length) {
*length = 1;
}
return true;
}
bool GLES2Implementation::GetQueryObjectValueHelper(const char* function_name,
GLuint id,
GLenum pname,
GLuint64* params) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] GetQueryObjectValueHelper(" << id
<< ", " << GLES2Util::GetStringQueryObjectParameter(pname)
<< ", " << static_cast<const void*>(params) << ")");
QueryTracker::Query* query = query_tracker_->GetQuery(id);
if (!query) {
SetGLError(GL_INVALID_OPERATION, function_name, "unknown query id");
return false;
}
if (query->Active()) {
SetGLError(GL_INVALID_OPERATION, function_name,
"query active. Did you call glEndQueryEXT?");
return false;
}
if (query->NeverUsed()) {
SetGLError(GL_INVALID_OPERATION, function_name,
"Never used. Did you call glBeginQueryEXT?");
return false;
}
bool valid_value = false;
const bool flush_if_pending =
pname != GL_QUERY_RESULT_AVAILABLE_NO_FLUSH_CHROMIUM_EXT;
switch (pname) {
case GL_QUERY_RESULT_EXT:
if (!query->CheckResultsAvailable(helper_, flush_if_pending)) {
helper_->WaitForToken(query->token());
if (!query->CheckResultsAvailable(helper_, flush_if_pending)) {
FinishHelper();
CHECK(query->CheckResultsAvailable(helper_, flush_if_pending));
}
}
*params = query->GetResult();
valid_value = true;
break;
case GL_QUERY_RESULT_AVAILABLE_EXT:
*params = query->CheckResultsAvailable(helper_, flush_if_pending);
valid_value = true;
break;
case GL_QUERY_RESULT_AVAILABLE_NO_FLUSH_CHROMIUM_EXT:
*params = query->CheckResultsAvailable(helper_, flush_if_pending);
valid_value = true;
break;
default:
SetGLErrorInvalidEnum(function_name, pname, "pname");
break;
}
GPU_CLIENT_LOG(" " << *params);
CheckGLError();
return valid_value;
}
GLuint GLES2Implementation::GetMaxValueInBufferCHROMIUMHelper(GLuint buffer_id,
GLsizei count,
GLenum type,
GLuint offset) {
typedef cmds::GetMaxValueInBufferCHROMIUM::Result Result;
auto result = GetResultAs<Result>();
if (!result) {
return 0;
}
*result = 0;
helper_->GetMaxValueInBufferCHROMIUM(buffer_id, count, type, offset,
GetResultShmId(), result.offset());
WaitForCmd();
return *result;
}
GLuint GLES2Implementation::GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
GLsizei count,
GLenum type,
GLuint offset) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetMaxValueInBufferCHROMIUM("
<< buffer_id << ", " << count << ", "
<< GLES2Util::GetStringGetMaxIndexType(type) << ", "
<< offset << ")");
GLuint result =
GetMaxValueInBufferCHROMIUMHelper(buffer_id, count, type, offset);
GPU_CLIENT_LOG("returned " << result);
CheckGLError();
return result;
}
void GLES2Implementation::RestoreElementAndArrayBuffers(bool restore) {
if (restore) {
RestoreArrayBuffer(restore);
// Restore the element array binding.
// We only need to restore it if it wasn't a client side array.
if (vertex_array_object_manager_->bound_element_array_buffer() == 0) {
helper_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
}
}
void GLES2Implementation::RestoreArrayBuffer(bool restore) {
if (restore) {
// Restore the user's current binding.
helper_->BindBuffer(GL_ARRAY_BUFFER, bound_array_buffer_);
}
}
void GLES2Implementation::DrawElements(GLenum mode,
GLsizei count,
GLenum type,
const void* indices) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawElements("
<< GLES2Util::GetStringDrawMode(mode) << ", " << count
<< ", " << GLES2Util::GetStringIndexType(type) << ", "
<< static_cast<const void*>(indices) << ")");
DrawElementsImpl(mode, count, type, indices, "glDrawElements");
}
void GLES2Implementation::DrawRangeElements(GLenum mode,
GLuint start,
GLuint end,
GLsizei count,
GLenum type,
const void* indices) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawRangeElements("
<< GLES2Util::GetStringDrawMode(mode) << ", " << start
<< ", " << end << ", " << count << ", "
<< GLES2Util::GetStringIndexType(type) << ", "
<< static_cast<const void*>(indices) << ")");
if (end < start) {
SetGLError(GL_INVALID_VALUE, "glDrawRangeElements", "end < start");
return;
}
DrawElementsImpl(mode, count, type, indices, "glDrawRangeElements");
}
void GLES2Implementation::DrawElementsImpl(GLenum mode,
GLsizei count,
GLenum type,
const void* indices,
const char* func_name) {
if (count < 0) {
SetGLError(GL_INVALID_VALUE, func_name, "count < 0");
return;
}
bool simulated = false;
GLuint offset = ToGLuint(indices);
if (count > 0) {
if (vertex_array_object_manager_->bound_element_array_buffer() != 0 &&
!ValidateOffset(func_name, reinterpret_cast<GLintptr>(indices))) {
return;
}
if (!vertex_array_object_manager_->SetupSimulatedIndexAndClientSideBuffers(
func_name, this, helper_, count, type, 0, indices, &offset,
&simulated)) {
return;
}
}
helper_->DrawElements(mode, count, type, offset);
RestoreElementAndArrayBuffers(simulated);
CheckGLError();
}
void GLES2Implementation::Flush() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFlush()");
flush_id_ = GenerateNextFlushId();
// Insert the cmd to call glFlush
helper_->Flush();
FlushHelper();
}
void GLES2Implementation::IssueShallowFlush() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShallowFlushCHROMIUM()");
flush_id_ = GenerateNextFlushId();
FlushHelper();
}
void GLES2Implementation::ShallowFlushCHROMIUM() {
IssueShallowFlush();
}
void GLES2Implementation::FlushHelper() {
// Flush our command buffer
// (tell the service to execute up to the flush cmd.)
helper_->CommandBufferHelper::Flush();
if (aggressively_free_resources_)
FreeEverything();
}
void GLES2Implementation::OrderingBarrierCHROMIUM() {
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glOrderingBarrierCHROMIUM");
// Flush command buffer at the GPU channel level. May be implemented as
// Flush().
helper_->CommandBufferHelper::OrderingBarrier();
}
void GLES2Implementation::Finish() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
flush_id_ = GenerateNextFlushId();
FinishHelper();
}
void GLES2Implementation::ShallowFinishCHROMIUM() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
TRACE_EVENT0("gpu", "GLES2::ShallowFinishCHROMIUM");
flush_id_ = GenerateNextFlushId();
// Flush our command buffer (tell the service to execute up to the flush cmd
// and don't return until it completes).
helper_->CommandBufferHelper::Finish();
if (aggressively_free_resources_)
FreeEverything();
}
void GLES2Implementation::FinishHelper() {
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFinish()");
TRACE_EVENT0("gpu", "GLES2::Finish");
// Insert the cmd to call glFinish
helper_->Finish();
// Finish our command buffer
// (tell the service to execute up to the Finish cmd and wait for it to
// execute.)
helper_->CommandBufferHelper::Finish();
if (aggressively_free_resources_)
FreeEverything();
}
GLuint GLES2Implementation::GetLastFlushIdCHROMIUM() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetLastFlushIdCHROMIUM()");
return flush_id_;
}
void GLES2Implementation::SwapBuffers(uint64_t swap_id, GLbitfield flags) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSwapBuffers()");
// TODO(piman): Strictly speaking we'd want to insert the token after the
// swap, but the state update with the updated token might not have happened
// by the time the SwapBuffer callback gets called, forcing us to synchronize
// with the GPU process more than needed. So instead, make it happen before.
// All it means is that we could be slightly looser on the kMaxSwapBuffers
// semantics if the client doesn't use the callback mechanism, and by chance
// the scheduler yields between the InsertToken and the SwapBuffers.
swap_buffers_tokens_.push(helper_->InsertToken());
helper_->SwapBuffers(swap_id, flags);
helper_->CommandBufferHelper::Flush();
// Wait if we added too many swap buffers. Add 1 to kMaxSwapBuffers to
// compensate for TODO above.
if (swap_buffers_tokens_.size() > kMaxSwapBuffers + 1) {
helper_->WaitForToken(swap_buffers_tokens_.front());
swap_buffers_tokens_.pop();
}
}
void GLES2Implementation::SwapBuffersWithBoundsCHROMIUM(uint64_t swap_id,
GLsizei count,
const GLint* rects,
GLbitfield flags) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSwapBuffersWithBoundsCHROMIUM("
<< count << ", " << static_cast<const void*>(rects)
<< ")");
GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei i = 0; i < count; ++i) {
GPU_CLIENT_LOG(" " << i << ": " << rects[0 + i * 4] << ", "
<< rects[1 + i * 4] << ", " << rects[2 + i * 4]
<< ", " << rects[3 + i * 4]);
}
});
if (count < 0) {
SetGLError(GL_INVALID_VALUE, "glSwapBuffersWithBoundsCHROMIUM",
"count < 0");
return;
}
// Same flow control as GLES2Implementation::SwapBuffers (see comments there).
swap_buffers_tokens_.push(helper_->InsertToken());
helper_->SwapBuffersWithBoundsCHROMIUMImmediate(swap_id, count, rects, flags);
helper_->CommandBufferHelper::Flush();
if (swap_buffers_tokens_.size() > kMaxSwapBuffers + 1) {
helper_->WaitForToken(swap_buffers_tokens_.front());
swap_buffers_tokens_.pop();
}
}
void GLES2Implementation::BindAttribLocation(GLuint program,
GLuint index,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindAttribLocation(" << program
<< ", " << index << ", " << name << ")");
SetBucketAsString(kResultBucketId, name);
helper_->BindAttribLocationBucket(program, index, kResultBucketId);
helper_->SetBucketSize(kResultBucketId, 0);
CheckGLError();
}
void GLES2Implementation::BindFragDataLocationEXT(GLuint program,
GLuint colorName,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindFragDataLocationEXT("
<< program << ", " << colorName << ", " << name << ")");
SetBucketAsString(kResultBucketId, name);
helper_->BindFragDataLocationEXTBucket(program, colorName, kResultBucketId);
helper_->SetBucketSize(kResultBucketId, 0);
CheckGLError();
}
void GLES2Implementation::BindFragDataLocationIndexedEXT(GLuint program,
GLuint colorName,
GLuint index,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindFragDataLocationEXT("
<< program << ", " << colorName << ", " << index << ", "
<< name << ")");
SetBucketAsString(kResultBucketId, name);
helper_->BindFragDataLocationIndexedEXTBucket(program, colorName, index,
kResultBucketId);
helper_->SetBucketSize(kResultBucketId, 0);
CheckGLError();
}
void GLES2Implementation::BindUniformLocationCHROMIUM(GLuint program,
GLint location,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindUniformLocationCHROMIUM("
<< program << ", " << location << ", " << name << ")");
SetBucketAsString(kResultBucketId, name);
helper_->BindUniformLocationCHROMIUMBucket(program, location,
kResultBucketId);
helper_->SetBucketSize(kResultBucketId, 0);
CheckGLError();
}
void GLES2Implementation::GetVertexAttribPointerv(GLuint index,
GLenum pname,
void** ptr) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetVertexAttribPointer(" << index
<< ", " << GLES2Util::GetStringVertexPointer(pname) << ", "
<< static_cast<void*>(ptr) << ")");
GPU_CLIENT_LOG_CODE_BLOCK(int32_t num_results = 1);
if (!vertex_array_object_manager_->GetAttribPointer(index, pname, ptr)) {
TRACE_EVENT0("gpu", "GLES2::GetVertexAttribPointerv");
typedef cmds::GetVertexAttribPointerv::Result Result;
auto result = GetResultAs<Result>();
if (!result) {
return;
}
result->SetNumResults(0);
helper_->GetVertexAttribPointerv(index, pname, GetResultShmId(),
result.offset());
WaitForCmd();
result->CopyResult(ptr);
GPU_CLIENT_LOG_CODE_BLOCK(num_results = result->GetNumResults());
}
GPU_CLIENT_LOG_CODE_BLOCK({
for (int32_t i = 0; i < num_results; ++i) {
GPU_CLIENT_LOG(" " << i << ": " << ptr[i]);
}
});
CheckGLError();
}
bool GLES2Implementation::DeleteProgramHelper(GLuint program) {
if (!GetIdHandler(SharedIdNamespaces::kProgramsAndShaders)
->FreeIds(this, 1, &program,
&GLES2Implementation::DeleteProgramStub)) {
SetGLError(GL_INVALID_VALUE, "glDeleteProgram",
"id not created by this context.");
return false;
}
if (program == current_program_) {
current_program_ = 0;
}
return true;
}
void GLES2Implementation::DeleteProgramStub(GLsizei n, const GLuint* programs) {
DCHECK_EQ(1, n);
share_group_->program_info_manager()->DeleteInfo(programs[0]);
helper_->DeleteProgram(programs[0]);
}
bool GLES2Implementation::DeleteShaderHelper(GLuint shader) {
if (!GetIdHandler(SharedIdNamespaces::kProgramsAndShaders)
->FreeIds(this, 1, &shader,
&GLES2Implementation::DeleteShaderStub)) {
SetGLError(GL_INVALID_VALUE, "glDeleteShader",
"id not created by this context.");
return false;
}
return true;
}
void GLES2Implementation::DeleteShaderStub(GLsizei n, const GLuint* shaders) {
DCHECK_EQ(1, n);
share_group_->program_info_manager()->DeleteInfo(shaders[0]);
helper_->DeleteShader(shaders[0]);
}
void GLES2Implementation::DeleteSyncHelper(GLsync sync) {
GLuint sync_uint = ToGLuint(sync);
if (!GetIdHandler(SharedIdNamespaces::kSyncs)
->FreeIds(this, 1, &sync_uint,
&GLES2Implementation::DeleteSyncStub)) {
SetGLError(GL_INVALID_VALUE, "glDeleteSync",
"id not created by this context.");
}
}
void GLES2Implementation::DeleteSyncStub(GLsizei n, const GLuint* syncs) {
DCHECK_EQ(1, n);
helper_->DeleteSync(syncs[0]);
}
GLint GLES2Implementation::GetAttribLocationHelper(GLuint program,
const char* name) {
typedef cmds::GetAttribLocation::Result Result;
SetBucketAsCString(kResultBucketId, name);
auto result = GetResultAs<Result>();
if (!result) {
return -1;
}
*result = -1;
helper_->GetAttribLocation(program, kResultBucketId, GetResultShmId(),
result.offset());
WaitForCmd();
helper_->SetBucketSize(kResultBucketId, 0);
return *result;
}
GLint GLES2Implementation::GetAttribLocation(GLuint program, const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetAttribLocation(" << program
<< ", " << name << ")");
TRACE_EVENT0("gpu", "GLES2::GetAttribLocation");
GLint loc = share_group_->program_info_manager()->GetAttribLocation(
this, program, name);
GPU_CLIENT_LOG("returned " << loc);
CheckGLError();
return loc;
}
GLint GLES2Implementation::GetUniformLocationHelper(GLuint program,
const char* name) {
typedef cmds::GetUniformLocation::Result Result;
SetBucketAsCString(kResultBucketId, name);
auto result = GetResultAs<Result>();
if (!result) {
return -1;
}
*result = -1;
helper_->GetUniformLocation(program, kResultBucketId, GetResultShmId(),
result.offset());
WaitForCmd();
helper_->SetBucketSize(kResultBucketId, 0);
return *result;
}
GLint GLES2Implementation::GetUniformLocation(GLuint program,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformLocation(" << program
<< ", " << name << ")");
TRACE_EVENT0("gpu", "GLES2::GetUniformLocation");
GLint loc = share_group_->program_info_manager()->GetUniformLocation(
this, program, name);
GPU_CLIENT_LOG("returned " << loc);
CheckGLError();
return loc;
}
bool GLES2Implementation::GetUniformIndicesHelper(GLuint program,
GLsizei count,
const char* const* names,
GLuint* indices) {
if (!PackStringsToBucket(count, names, nullptr, "glGetUniformIndices")) {
return false;
}
typedef cmds::GetUniformIndices::Result Result;
auto result = GetResultAs<Result>();
if (!result) {
return false;
}
result->SetNumResults(0);
helper_->GetUniformIndices(program, kResultBucketId, GetResultShmId(),
result.offset());
WaitForCmd();
if (result->GetNumResults() != count) {
return false;
}
result->CopyResult(indices);
return true;
}
void GLES2Implementation::GetUniformIndices(GLuint program,
GLsizei count,
const char* const* names,
GLuint* indices) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformIndices(" << program
<< ", " << count << ", " << names << ", " << indices
<< ")");
TRACE_EVENT0("gpu", "GLES2::GetUniformIndices");
if (count < 0) {
SetGLError(GL_INVALID_VALUE, "glGetUniformIndices", "count < 0");
return;
}
if (count == 0) {
return;
}
bool success = share_group_->program_info_manager()->GetUniformIndices(
this, program, count, names, indices);
if (success) {
GPU_CLIENT_LOG_CODE_BLOCK({
for (GLsizei ii = 0; ii < count; ++ii) {
GPU_CLIENT_LOG(" " << ii << ": " << indices[ii]);
}
});
}
CheckGLError();
}
bool GLES2Implementation::GetProgramivHelper(GLuint program,
GLenum pname,
GLint* params) {
bool got_value = share_group_->program_info_manager()->GetProgramiv(
this, program, pname, params);
GPU_CLIENT_LOG_CODE_BLOCK({
if (got_value) {
GPU_CLIENT_LOG(" 0: " << *params);
}
});
return got_value;
}
GLint GLES2Implementation::GetFragDataIndexEXTHelper(GLuint program,
const char* name) {
typedef cmds::GetFragDataIndexEXT::Result Result;
SetBucketAsCString(kResultBucketId, name);
auto result = GetResultAs<Result>();
if (!result) {
return -1;
}
*result = -1;
helper_->GetFragDataIndexEXT(program, kResultBucketId, GetResultShmId(),
result.offset());
WaitForCmd();
helper_->SetBucketSize(kResultBucketId, 0);
return *result;
}
GLint GLES2Implementation::GetFragDataIndexEXT(GLuint program,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetFragDataIndexEXT(" << program
<< ", " << name << ")");
TRACE_EVENT0("gpu", "GLES2::GetFragDataIndexEXT");
GLint loc = share_group_->program_info_manager()->GetFragDataIndex(
this, program, name);
GPU_CLIENT_LOG("returned " << loc);
CheckGLError();
return loc;
}
GLint GLES2Implementation::GetFragDataLocationHelper(GLuint program,
const char* name) {
typedef cmds::GetFragDataLocation::Result Result;
SetBucketAsCString(kResultBucketId, name);
auto result = GetResultAs<Result>();
if (!result) {
return -1;
}
*result = -1;
helper_->GetFragDataLocation(program, kResultBucketId, GetResultShmId(),
result.offset());
WaitForCmd();
helper_->SetBucketSize(kResultBucketId, 0);
return *result;
}
GLint GLES2Implementation::GetFragDataLocation(GLuint program,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetFragDataLocation(" << program
<< ", " << name << ")");
TRACE_EVENT0("gpu", "GLES2::GetFragDataLocation");
GLint loc = share_group_->program_info_manager()->GetFragDataLocation(
this, program, name);
GPU_CLIENT_LOG("returned " << loc);
CheckGLError();
return loc;
}
GLuint GLES2Implementation::GetUniformBlockIndexHelper(GLuint program,
const char* name) {
typedef cmds::GetUniformBlockIndex::Result Result;
SetBucketAsCString(kResultBucketId, name);
auto result = GetResultAs<Result>();
if (!result) {
return GL_INVALID_INDEX;
}
*result = GL_INVALID_INDEX;
helper_->GetUniformBlockIndex(program, kResultBucketId, GetResultShmId(),
result.offset());
WaitForCmd();
helper_->SetBucketSize(kResultBucketId, 0);
return *result;
}
GLuint GLES2Implementation::GetUniformBlockIndex(GLuint program,
const char* name) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformBlockIndex(" << program
<< ", " << name << ")");
TRACE_EVENT0("gpu", "GLES2::GetUniformBlockIndex");
GLuint index = share_group_->program_info_manager()->GetUniformBlockIndex(
this, program, name);
GPU_CLIENT_LOG("returned " << index);
CheckGLError();
return index;
}
void GLES2Implementation::LinkProgram(GLuint program) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLinkProgram(" << program << ")");
helper_->LinkProgram(program);
share_group_->program_info_manager()->CreateInfo(program);
CheckGLError();
}
void GLES2Implementation::ShaderBinary(GLsizei n,
const GLuint* shaders,
GLenum binaryformat,
const void* binary,
GLsizei length) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShaderBinary(" << n << ", "
<< static_cast<const void*>(shaders) << ", "
<< GLES2Util::GetStringEnum(binaryformat) << ", "
<< static_cast<const void*>(binary) << ", " << length
<< ")");
if (n < 0) {
SetGLError(GL_INVALID_VALUE, "glShaderBinary", "n < 0.");
return;
}
if (length < 0) {
SetGLError(GL_INVALID_VALUE, "glShaderBinary", "length < 0.");
return;
}
// TODO(gman): ShaderBinary should use buckets.
unsigned int shader_id_size = n * sizeof(*shaders);
ScopedTransferBufferArray<GLint> buffer(shader_id_size + length, helper_,
transfer_buffer_);
if (!buffer.valid() || buffer.num_elements() != shader_id_size + length) {
SetGLError(GL_OUT_OF_MEMORY, "glShaderBinary", "out of memory.");
return;
}
void* shader_ids = buffer.elements();
void* shader_data = buffer.elements() + shader_id_size;
memcpy(shader_ids, shaders, shader_id_size);
memcpy(shader_data, binary, length);
helper_->ShaderBinary(n, buffer.shm_id(), buffer.offset(), binaryformat,
buffer.shm_id(), buffer.offset() + shader_id_size,
length);
CheckGLError();
}
void GLES2Implementation::PixelStorei(GLenum pname, GLint param) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPixelStorei("
<< GLES2Util::GetStringPixelStore(pname) << ", " << param
<< ")");
// We have to validate before caching these parameters because we use them
// to compute image sizes on the client side.
switch (pname) {
case GL_PACK_ALIGNMENT:
case GL_UNPACK_ALIGNMENT:
if (param != 1 && param != 2 && param != 4 && param != 8) {
SetGLError(GL_INVALID_VALUE, "glPixelStorei", "invalid param");
return;
}
break;
case GL_PACK_ROW_LENGTH:
case GL_PACK_SKIP_PIXELS:
case GL_PACK_SKIP_ROWS:
case GL_UNPACK_IMAGE_HEIGHT:
case GL_UNPACK_SKIP_IMAGES:
if (capabilities_.major_version < 3) {
SetGLError(GL_INVALID_ENUM, "glPixelStorei", "invalid pname");
return;
}
if (param < 0) {
SetGLError(GL_INVALID_VALUE, "glPixelStorei", "invalid param");
return;
}
break;
case GL_UNPACK_ROW_LENGTH:
case GL_UNPACK_SKIP_ROWS:
case GL_UNPACK_SKIP_PIXELS:
// These parameters are always enabled in ES2 by EXT_unpack_subimage.
if (param < 0) {
SetGLError(GL_INVALID_VALUE, "glPixelStorei", "invalid param");
return;
}
break;
default:
SetGLError(GL_INVALID_ENUM, "glPixelStorei", "invalid pname");
return;
}
// Do not send SKIP parameters to the service side.
// Handle them on the client side.
switch (pname) {
case GL_PACK_ALIGNMENT:
pack_alignment_ = param;
break;
case GL_PACK_ROW_LENGTH:
pack_row_length_ = param;
break;
case GL_PACK_SKIP_PIXELS:
pack_skip_pixels_ = param;
return;
case GL_PACK_SKIP_ROWS:
pack_skip_rows_ = param;
return;
case GL_UNPACK_ALIGNMENT:
unpack_alignment_ = param;
break;
case GL_UNPACK_ROW_LENGTH:
unpack_row_length_ = param;
if (capabilities_.major_version < 3) {
// In ES2 with EXT_unpack_subimage, it's handled on the client side
// and there is no need to send it to the service side.
return;
}
break;
case GL_UNPACK_IMAGE_HEIGHT:
unpack_image_height_ = param;
break;
case GL_UNPACK_SKIP_ROWS:
unpack_skip_rows_ = param;
return;
case GL_UNPACK_SKIP_PIXELS:
unpack_skip_pixels_ = param;
return;
case GL_UNPACK_SKIP_IMAGES:
unpack_skip_images_ = param;
return;
default:
NOTREACHED();
break;
}
helper_->PixelStorei(pname, param);
CheckGLError();
}
void GLES2Implementation::VertexAttribIPointer(GLuint index,
GLint size,
GLenum type,
GLsizei stride,
const void* ptr) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribIPointer(" << index
<< ", " << size << ", "
<< GLES2Util::GetStringVertexAttribIType(type) << ", "
<< stride << ", " << ptr << ")");
// Record the info on the client side.
if (!vertex_array_object_manager_->SetAttribPointer(
bound_array_buffer_, index, size, type, GL_FALSE, stride, ptr,
GL_TRUE)) {
SetGLError(GL_INVALID_OPERATION, "glVertexAttribIPointer",
"client side arrays are not allowed in vertex array objects.");
return;
}
if (!support_client_side_arrays_ || bound_array_buffer_ != 0) {
// Only report NON client side buffers to the service.
if (!ValidateOffset("glVertexAttribIPointer",
reinterpret_cast<GLintptr>(ptr))) {
return;
}
helper_->VertexAttribIPointer(index, size, type, stride, ToGLuint(ptr));
}
CheckGLError();
}
void GLES2Implementation::VertexAttribPointer(GLuint index,
GLint size,
GLenum type,
GLboolean normalized,
GLsizei stride,
const void* ptr) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribPointer(" << index
<< ", " << size << ", "
<< GLES2Util::GetStringVertexAttribType(type) << ", "
<< GLES2Util::GetStringBool(normalized) << ", " << stride
<< ", " << ptr << ")");
// Record the info on the client side.
if (!vertex_array_object_manager_->SetAttribPointer(
bound_array_buffer_, index, size, type, normalized, stride, ptr,
GL_FALSE)) {
SetGLError(GL_INVALID_OPERATION, "glVertexAttribPointer",
"client side arrays are not allowed in vertex array objects.");
return;
}
if (!support_client_side_arrays_ || bound_array_buffer_ != 0) {
// Only report NON client side buffers to the service.
if (!ValidateOffset("glVertexAttribPointer",
reinterpret_cast<GLintptr>(ptr))) {
return;
}
helper_->VertexAttribPointer(index, size, type, normalized, stride,
ToGLuint(ptr));
}
CheckGLError();
}
void GLES2Implementation::VertexAttribDivisorANGLE(GLuint index,
GLuint divisor) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribDivisorANGLE("
<< index << ", " << divisor << ") ");
// Record the info on the client side.
vertex_array_object_manager_->SetAttribDivisor(index, divisor);
helper_->VertexAttribDivisorANGLE(index, divisor);
CheckGLError();
}
void GLES2Implementation::BufferDataHelper(GLenum target,
GLsizeiptr size,
const void* data,
GLenum usage) {
if (!ValidateSize("glBufferData", size))
return;
#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
// Do not upload uninitialized data. Even if it's not a bug, it can cause a
// bogus MSan report during a readback later. This is because MSan doesn't
// understand shared memory and would assume we were reading back the same
// unintialized data.
if (data)
__msan_check_mem_is_initialized(data, size);
#endif
GLuint buffer_id;
if (GetBoundPixelTransferBuffer(target, "glBufferData", &buffer_id)) {
if (!buffer_id) {
return;
}
BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
if (buffer)
RemoveTransferBuffer(buffer);
// Create new buffer.
buffer = buffer_tracker_->CreateBuffer(buffer_id, size);
DCHECK(buffer);
if (buffer->address() && data)
memcpy(buffer->address(), data, size);
return;
}
if (IsReadbackUsage(usage)) {
GLuint id = GetBoundBufferHelper(target);
readback_buffer_shadow_tracker_->GetOrCreateBuffer(id, size);
}
RemoveMappedBufferRangeByTarget(target);
// If there is no data just send BufferData
if (size == 0 || !data) {
helper_->BufferData(target, size, 0, 0, usage);
return;
}
// See if we can send all at once.
ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
if (!buffer.valid()) {
return;
}
if (buffer.size() >= static_cast<unsigned int>(size)) {
memcpy(buffer.address(), data, size);
helper_->BufferData(target, size, buffer.shm_id(), buffer.offset(), usage);
return;
}
// Make the buffer with BufferData then send via BufferSubData
helper_->BufferData(target, size, 0, 0, usage);
BufferSubDataHelperImpl(target, 0, size, data, &buffer);
CheckGLError();
}
void GLES2Implementation::BufferData(GLenum target,
GLsizeiptr size,
const void* data,
GLenum usage) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferData("
<< GLES2Util::GetStringBufferTarget(target) << ", " << size
<< ", " << static_cast<const void*>(data) << ", "
<< GLES2Util::GetStringBufferUsage(usage) << ")");
BufferDataHelper(target, size, data, usage);
CheckGLError();
}
void GLES2Implementation::BufferSubDataHelper(GLenum target,
GLintptr offset,
GLsizeiptr size,
const void* data) {
if (size == 0) {
return;
}
if (!ValidateSize("glBufferSubData", size) ||
!ValidateOffset("glBufferSubData", offset)) {
return;
}
GLuint buffer_id;
if (GetBoundPixelTransferBuffer(target, "glBufferSubData", &buffer_id)) {
if (!buffer_id) {
return;
}
BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
if (!buffer) {
SetGLError(GL_INVALID_VALUE, "glBufferSubData", "unknown buffer");
return;
}
int32_t end = 0;
int32_t buffer_size = buffer->size();
if (!SafeAddInt32(offset, size, &end) || end > buffer_size) {
SetGLError(GL_INVALID_VALUE, "glBufferSubData", "out of range");
return;
}
if (buffer->address() && data)
memcpy(static_cast<uint8_t*>(buffer->address()) + offset, data, size);
return;
}
ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
BufferSubDataHelperImpl(target, offset, size, data, &buffer);
}
void GLES2Implementation::BufferSubDataHelperImpl(
GLenum target,
GLintptr offset,
GLsizeiptr size,
const void* data,
ScopedTransferBufferPtr* buffer) {
DCHECK(buffer);
DCHECK_GT(size, 0);
auto DoBufferSubData = [&](const std::array<uint32_t, 1>&,
uint32_t copy_offset, uint32_t) {
helper_->BufferSubData(target, offset + copy_offset, buffer->size(),
buffer->shm_id(), buffer->offset());
InvalidateReadbackBufferShadowDataCHROMIUM(GetBoundBufferHelper(target));
};
if (!TransferArraysAndExecute(size, buffer, DoBufferSubData,
static_cast<const int8_t*>(data))) {
SetGLError(GL_OUT_OF_MEMORY, "glBufferSubData", "out of memory");
}
}
void GLES2Implementation::BufferSubData(GLenum target,
GLintptr offset,
GLsizeiptr size,
const void* data) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferSubData("
<< GLES2Util::GetStringBufferTarget(target) << ", "
<< offset << ", " << size << ", "
<< static_cast<const void*>(data) << ")");
BufferSubDataHelper(target, offset, size, data);
CheckGLError();
}
void GLES2Implementation::MultiDrawArraysWEBGLHelper(GLenum mode,
const GLint* firsts,
const GLsizei* counts,
GLsizei drawcount) {
DCHECK_GT(drawcount, 0);
uint32_t buffer_size = ComputeCombinedCopySize(drawcount, firsts, counts);
ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
// TODO(crbug.com/890539): Increment a base gl_DrawID for multiple calls to
// this helper
auto DoMultiDraw = [&](const std::array<uint32_t, 2>& offsets, uint32_t,
uint32_t copy_count) {
helper_->MultiDrawArraysWEBGL(mode, buffer.shm_id(),
buffer.offset() + offsets[0], buffer.shm_id(),
buffer.offset() + offsets[1], copy_count);
};
if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, firsts,
counts)) {
SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawArraysWEBGL", "out of memory");
}
}
void GLES2Implementation::MultiDrawArraysInstancedWEBGLHelper(
GLenum mode,
const GLint* firsts,
const GLsizei* counts,
const GLsizei* instance_counts,
GLsizei drawcount) {
DCHECK_GT(drawcount, 0);
uint32_t buffer_size =
ComputeCombinedCopySize(drawcount, firsts, counts, instance_counts);
ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
// TODO(crbug.com/890539): Increment a base gl_DrawID for multiple calls to
// this helper
auto DoMultiDraw = [&](const std::array<uint32_t, 3>& offsets, uint32_t,
uint32_t copy_count) {
helper_->MultiDrawArraysInstancedWEBGL(
mode, buffer.shm_id(), buffer.offset() + offsets[0], buffer.shm_id(),
buffer.offset() + offsets[1], buffer.shm_id(),
buffer.offset() + offsets[2], copy_count);
};
if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, firsts, counts,
instance_counts)) {
SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawArraysInstancedWEBGL",
"out of memory");
}
}
void GLES2Implementation::MultiDrawElementsWEBGLHelper(GLenum mode,
const GLsizei* counts,
GLenum type,
const GLsizei* offsets,
GLsizei drawcount) {
DCHECK_GT(drawcount, 0);
uint32_t buffer_size = ComputeCombinedCopySize(drawcount, counts, offsets);
ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
// TODO(crbug.com/890539): Increment a base gl_DrawID for multiple calls to
// this helper
auto DoMultiDraw = [&](const std::array<uint32_t, 2>& offsets, uint32_t,
uint32_t copy_count) {
helper_->MultiDrawElementsWEBGL(
mode, buffer.shm_id(), buffer.offset() + offsets[0], type,
buffer.shm_id(), buffer.offset() + offsets[1], copy_count);
};
if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, counts,
offsets)) {
SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawElementsWEBGL", "out of memory");
}
}
void GLES2Implementation::MultiDrawElementsInstancedWEBGLHelper(
GLenum mode,
const GLsizei* counts,
GLenum type,
const GLsizei* offsets,
const GLsizei* instance_counts,
GLsizei drawcount) {
DCHECK_GT(drawcount, 0);
uint32_t buffer_size =
ComputeCombinedCopySize(drawcount, counts, offsets, instance_counts);
ScopedTransferBufferPtr buffer(buffer_size, helper_, transfer_buffer_);
// TODO(crbug.com/890539): Increment a base gl_DrawID for multiple calls to
// this helper
auto DoMultiDraw = [&](const std::array<uint32_t, 3>& offsets, uint32_t,
uint32_t copy_count) {
helper_->MultiDrawElementsInstancedWEBGL(
mode, buffer.shm_id(), buffer.offset() + offsets[0], type,
buffer.shm_id(), buffer.offset() + offsets[1], buffer.shm_id(),
buffer.offset() + offsets[2], copy_count);
};
if (!TransferArraysAndExecute(drawcount, &buffer, DoMultiDraw, counts,
offsets, instance_counts)) {
SetGLError(GL_OUT_OF_MEMORY, "glMultiDrawElementsInstancedWEBGL",
"out of memory");
}
}
void GLES2Implementation::MultiDrawArraysWEBGL(GLenum mode,
const GLint* firsts,
const GLsizei* counts,
GLsizei drawcount) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMultiDrawArraysWEBGL("
<< GLES2Util::GetStringDrawMode(mode) << ", " << firsts
<< ", " << counts << ", " << drawcount << ")");
if (drawcount < 0) {
SetGLError(GL_INVALID_VALUE, "glMultiDrawArraysWEBGL", "drawcount < 0");
return;
}
if (drawcount == 0) {
return;
}
// This is for an extension for WebGL which doesn't support client side arrays
if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
SetGLError(GL_INVALID_OPERATION, "glMultiDrawArraysWEBGL",
"Missing array buffer for vertex attribute");
return;
}
MultiDrawArraysWEBGLHelper(mode, firsts, counts, drawcount);
CheckGLError();
}
void GLES2Implementation::MultiDrawArraysInstancedWEBGL(
GLenum mode,
const GLint* firsts,
const GLsizei* counts,
const GLsizei* instance_counts,
GLsizei drawcount) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMultiDrawArraysInstancedWEBGL("
<< GLES2Util::GetStringDrawMode(mode) << ", " << firsts
<< ", " << counts << ", " << instance_counts << ", "
<< drawcount << ")");
if (drawcount < 0) {
SetGLError(GL_INVALID_VALUE, "glMultiDrawArraysWEBGLInstanced",
"drawcount < 0");
return;
}
if (drawcount == 0) {
return;
}
// This is for an extension for WebGL which doesn't support client side arrays
if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
SetGLError(GL_INVALID_OPERATION, "glMultiDrawArraysWEBGLInstanced",
"Missing array buffer for vertex attribute");
return;
}
MultiDrawArraysInstancedWEBGLHelper(mode, firsts, counts, instance_counts,
drawcount);
CheckGLError();
}
void GLES2Implementation::MultiDrawElementsWEBGL(GLenum mode,
const GLsizei* counts,
GLenum type,
const GLsizei* offsets,
GLsizei drawcount) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMultiDrawElementsWEBGL("
<< GLES2Util::GetStringDrawMode(mode) << ", " << counts
<< ", " << GLES2Util::GetStringIndexType(type) << ", "
<< offsets << ", " << drawcount << ")");
if (drawcount < 0) {
SetGLError(GL_INVALID_VALUE, "glMultiDrawElementsWEBGL", "drawcount < 0");
return;
}
if (drawcount == 0) {
return;
}
// This is for an extension for WebGL which doesn't support client side arrays
if (vertex_array_object_manager_->bound_element_array_buffer() == 0) {
SetGLError(GL_INVALID_OPERATION, "glMultiDrawElementsWEBGL",
"No element array buffer");
return;
}
if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
SetGLError(GL_INVALID_OPERATION, "glMultiDrawElementsWEBGL",
"Missing array buffer for vertex attribute");
return;
}
MultiDrawElementsWEBGLHelper(mode, counts, type, offsets, drawcount);
CheckGLError();
}
void GLES2Implementation::MultiDrawElementsInstancedWEBGL(
GLenum mode,
const GLsizei* counts,
GLenum type,
const GLsizei* offsets,
const GLsizei* instance_counts,
GLsizei drawcount) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMultiDrawElementsInstancedWEBGL("
<< GLES2Util::GetStringDrawMode(mode) << ", " << counts
<< ", " << GLES2Util::GetStringIndexType(type) << ", "
<< offsets << ", " << instance_counts << ", " << drawcount
<< ")");
if (drawcount < 0) {
SetGLError(GL_INVALID_VALUE, "glMultiDrawElementsInstancedWEBGL",
"drawcount < 0");
return;
}
if (drawcount == 0) {
return;
}
// This is for an extension for WebGL which doesn't support client side arrays
if (vertex_array_object_manager_->bound_element_array_buffer() == 0) {
SetGLError(GL_INVALID_OPERATION, "glMultiDrawElementsInstancedWEBGL",
"No element array buffer");
return;
}
if (vertex_array_object_manager_->SupportsClientSideBuffers()) {
SetGLError(GL_INVALID_OPERATION, "glMultiDrawElementsInstancedWEBGL",
"Missing array buffer for vertex attribute");
return;
}
MultiDrawElementsInstancedWEBGLHelper(mode, counts, type, offsets,
instance_counts, drawcount);
CheckGLError();
}
void GLES2Implementation::RemoveTransferBuffer(BufferTracker::Buffer* buffer) {
int32_t token = buffer->last_usage_token();
if (token) {
if (helper_->HasTokenPassed(token))
buffer_tracker_->Free(buffer);
else
buffer_tracker_->FreePendingToken(buffer, token);
} else {
buffer_tracker_->Free(buffer);
}
buffer_tracker_->RemoveBuffer(buffer->id());
}
bool GLES2Implementation::GetBoundPixelTransferBuffer(GLenum target,
const char* function_name,
GLuint* buffer_id) {
*buffer_id = 0;
switch (target) {
case GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM:
*buffer_id = bound_pixel_pack_transfer_buffer_id_;
break;
case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
*buffer_id = bound_pixel_unpack_transfer_buffer_id_;
break;
default:
// Unknown target
return false;
}
if (!*buffer_id) {
SetGLError(GL_INVALID_OPERATION, function_name, "no buffer bound");
}
return true;
}
BufferTracker::Buffer* GLES2Implementation::GetBoundPixelTransferBufferIfValid(
GLuint buffer_id,
const char* function_name,
GLuint offset,
GLsizei size) {
DCHECK(buffer_id);
BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
if (!buffer) {
SetGLError(GL_INVALID_OPERATION, function_name, "invalid buffer");
return nullptr;
}
if (buffer->mapped()) {
SetGLError(GL_INVALID_OPERATION, function_name, "buffer mapped");
return nullptr;
}
base::CheckedNumeric<uint32_t> buffer_offset = buffer->shm_offset();
buffer_offset += offset;
if (!buffer_offset.IsValid()) {
SetGLError(GL_INVALID_VALUE, function_name, "offset to large");
return nullptr;
}
base::CheckedNumeric<uint32_t> required_size = offset;
required_size += size;
if (!required_size.IsValid() ||
buffer->size() < required_size.ValueOrDefault(0)) {
SetGLError(GL_INVALID_VALUE, function_name, "unpack size to large");
return nullptr;
}
return buffer;
}
void GLES2Implementation::InvalidateReadbackBufferShadowDataCHROMIUM(
GLuint buffer_id) {
readback_buffer_shadow_tracker_->OnBufferWrite(buffer_id);
}
void GLES2Implementation::CompressedTexImage2D(GLenum target,
GLint level,
GLenum internalformat,
GLsizei width,
GLsizei height,
GLint border,
GLsizei image_size,
const void* data) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG(
"[" << GetLogPrefix() << "] glCompressedTexImage2D("
<< GLES2Util::GetStringTextureTarget(target) << ", " << level << ", "
<< GLES2Util::GetStringCompressedTextureFormat(internalformat) << ", "
<< width << ", " << height << ", " << border << ", " << image_size
<< ", " << static_cast<const void*>(data) << ")");
if (width < 0 || height < 0 || level < 0) {
SetGLError(GL_INVALID_VALUE, "glCompressedTexImage2D", "dimension < 0");
return;
}
if (border != 0) {
SetGLError(GL_INVALID_VALUE, "glCompressedTexImage2D", "border != 0");
return;
}
// If there's a pixel unpack buffer bound use it when issuing
// CompressedTexImage2D.
if (bound_pixel_unpack_transfer_buffer_id_) {
GLuint offset = ToGLuint(data);
BufferTracker::Buffer* buffer = GetBoundPixelTransferBufferIfValid(
bound_pixel_unpack_transfer_buffer_id_, "glCompressedTexImage2D",
offset, image_size);
if (buffer && buffer->shm_id() != -1) {
helper_->CompressedTexImage2D(target, level, internalformat, width,
height, image_size, buffer->shm_id(),
buffer->shm_offset() + offset);
buffer->set_last_usage_token(helper_->InsertToken());
}
return;
}
if (bound_pixel_unpack_buffer_) {
helper_->CompressedTexImage2D(target, level, internalformat, width, height,
image_size, 0, ToGLuint(data));
} else if (data) {
SetBucketContents(kResultBucketId, data, image_size);
helper_->CompressedTexImage2DBucket(target, level, internalformat, width,
height, kResultBucketId);
// Free the bucket. This is not required but it does free up the memory.
// and we don't have to wait for the result so from the client's perspective
// it's cheap.
helper_->SetBucketSize(kResultBucketId, 0);
} else {
helper_->CompressedTexImage2D(target, level, internalformat, width, height,
image_size, 0, 0);
}
CheckGLError();
}
void GLES2Implementation::CompressedTexSubImage2D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLsizei image_size,
const void* data) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG(
"[" << GetLogPrefix() << "] glCompressedTexSubImage2D("
<< GLES2Util::GetStringTextureTarget(target) << ", " << level << ", "
<< xoffset << ", " << yoffset << ", " << width << ", " << height
<< ", " << GLES2Util::GetStringCompressedTextureFormat(format) << ", "
<< image_size << ", " << static_cast<const void*>(data) << ")");
if (width < 0 || height < 0 || level < 0) {
SetGLError(GL_INVALID_VALUE, "glCompressedTexSubImage2D", "dimension < 0");
return;
}
// If there's a pixel unpack buffer bound use it when issuing
// CompressedTexSubImage2D.
if (bound_pixel_unpack_transfer_buffer_id_) {
GLuint offset = ToGLuint(data);
BufferTracker::Buffer* buffer = GetBoundPixelTransferBufferIfValid(
bound_pixel_unpack_transfer_buffer_id_, "glCompressedTexSubImage2D",
offset, image_size);
if (buffer && buffer->shm_id() != -1) {
helper_->CompressedTexSubImage2D(
target, level, xoffset, yoffset, width, height, format, image_size,
buffer->shm_id(), buffer->shm_offset() + offset);
buffer->set_last_usage_token(helper_->InsertToken());
CheckGLError();
}
return;
}
if (bound_pixel_unpack_buffer_) {
helper_->CompressedTexSubImage2D(target, level, xoffset, yoffset, width,
height, format, image_size, 0,
ToGLuint(data));
} else if (data) {
SetBucketContents(kResultBucketId, data, image_size);
helper_->CompressedTexSubImage2DBucket(target, level, xoffset, yoffset,
width, height, format,
kResultBucketId);
// Free the bucket. This is not required but it does free up the memory.
// and we don't have to wait for the result so from the client's perspective
// it's cheap.
helper_->SetBucketSize(kResultBucketId, 0);
} else {