Clone of chromium aad1ce808763f59c7a3753e08f1500a104ecc6fd refs/remotes/origin/HEAD
diff --git a/gpu/command_buffer/client/BUILD.gn b/gpu/command_buffer/client/BUILD.gn
new file mode 100644
index 0000000..afd03a4
--- /dev/null
+++ b/gpu/command_buffer/client/BUILD.gn
@@ -0,0 +1,193 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+source_set("client") {
+ sources = [
+ "cmd_buffer_helper.cc",
+ "cmd_buffer_helper.h",
+ "fenced_allocator.cc",
+ "fenced_allocator.h",
+ "gpu_control.h",
+ "mapped_memory.cc",
+ "mapped_memory.h",
+ "ring_buffer.cc",
+ "ring_buffer.h",
+ "transfer_buffer.cc",
+ "transfer_buffer.h",
+ ]
+
+ defines = [ "GPU_IMPLEMENTATION" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
+
+ deps = [
+ "//gpu/command_buffer/common",
+ ]
+}
+
+group("gles2_cmd_helper") {
+ if (is_component_build) {
+ deps = [ "//gpu" ]
+ } else {
+ deps = [ ":gles2_cmd_helper_sources" ]
+ }
+}
+
+source_set("gles2_cmd_helper_sources") {
+ visibility = [ ":gles2_cmd_helper", "//gpu" ]
+ sources = [
+ "gles2_cmd_helper.cc",
+ "gles2_cmd_helper.h",
+ "gles2_cmd_helper_autogen.h",
+ ]
+
+ defines = [ "GPU_IMPLEMENTATION" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ deps = [ ":client" ]
+}
+
+gles2_c_lib_source_files = [
+ "gles2_c_lib.cc",
+ "gles2_c_lib_autogen.h",
+ "gles2_c_lib_export.h",
+ "gles2_lib.h",
+ "gles2_lib.cc",
+]
+
+gles2_implementation_source_files = [
+ "buffer_tracker.cc",
+ "buffer_tracker.h",
+ "client_context_state.h",
+ "client_context_state.cc",
+ "client_context_state_autogen.h",
+ "client_context_state_impl_autogen.h",
+ "gles2_impl_export.h",
+ "gles2_implementation_autogen.h",
+ "gles2_implementation.cc",
+ "gles2_implementation.h",
+ "gles2_implementation_impl_autogen.h",
+ "gles2_trace_implementation_autogen.h",
+ "gles2_trace_implementation.cc",
+ "gles2_trace_implementation.h",
+ "gles2_trace_implementation_impl_autogen.h",
+ "gpu_memory_buffer_factory.h",
+ "gpu_memory_buffer_tracker.cc",
+ "gpu_memory_buffer_tracker.h",
+ "gpu_switches.cc",
+ "gpu_switches.h",
+ "program_info_manager.cc",
+ "program_info_manager.h",
+ "query_tracker.cc",
+ "query_tracker.h",
+ "share_group.cc",
+ "share_group.h",
+ "vertex_array_object_manager.cc",
+ "vertex_array_object_manager.h",
+]
+
+# Provides GLES2 interface, but does not cause any implementation to be linked
+# in. Useful when a target uses the interface, but permits its users to choose
+# an implementation.
+source_set("gles2_interface") {
+ sources = [ "gles2_interface.h" ]
+ public_configs = [ "//third_party/khronos:khronos_headers" ]
+ deps = [
+ "//base",
+ ]
+}
+
+# Library emulates GLES2 using command_buffers.
+component("gles2_implementation") {
+ sources = gles2_implementation_source_files
+
+ defines = [ "GLES2_IMPL_IMPLEMENTATION" ]
+ all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ deps = [
+ ":gles2_cmd_helper",
+ ":gles2_interface",
+ "//base",
+ "//gpu/command_buffer/common",
+ "//ui/gfx/geometry",
+ ]
+}
+
+# Library emulates GLES2 using command_buffers.
+component("gles2_implementation_client_side_arrays") {
+ sources = gles2_implementation_source_files
+
+ defines = [
+ "GLES2_IMPL_IMPLEMENTATION",
+ "GLES2_SUPPORT_CLIENT_SIDE_ARRAYS=1",
+ ]
+ all_dependent_configs = [ "//third_party/khronos:khronos_headers" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ deps = [
+ ":gles2_cmd_helper",
+ ":gles2_interface",
+ "//base",
+ "//gpu/command_buffer/common",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ ]
+}
+
+component("gl_in_process_context") {
+ sources = [
+ "gl_in_process_context.h",
+ "gl_in_process_context.cc",
+ "gl_in_process_context_export.h",
+ ]
+
+ defines = [ "GL_IN_PROCESS_CONTEXT_IMPLEMENTATION" ]
+
+ deps = [
+ ":gles2_implementation",
+ "//gpu",
+ "//gpu/command_buffer/common:gles2_utils",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//ui/gfx/geometry",
+ "//ui/gl",
+ ]
+}
+
+component("gles2_c_lib") {
+ sources = gles2_c_lib_source_files
+ defines = [ "GLES2_C_LIB_IMPLEMENTATION" ]
+
+ if (is_win) {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ cflags = [ "/wd4267" ] # size_t to int truncation.
+ }
+
+ deps = [
+ ":client",
+ ":gles2_interface",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ "//gpu/command_buffer/common",
+ ]
+}
+
diff --git a/gpu/command_buffer/client/buffer_tracker.cc b/gpu/command_buffer/client/buffer_tracker.cc
new file mode 100644
index 0000000..5887e52
--- /dev/null
+++ b/gpu/command_buffer/client/buffer_tracker.cc
@@ -0,0 +1,89 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/buffer_tracker.h"
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+
+namespace gpu {
+namespace gles2 {
+
+BufferTracker::BufferTracker(MappedMemoryManager* manager)
+ : mapped_memory_(manager) {
+}
+
+BufferTracker::~BufferTracker() {
+ while (!buffers_.empty()) {
+ RemoveBuffer(buffers_.begin()->first);
+ }
+}
+
+BufferTracker::Buffer* BufferTracker::CreateBuffer(
+ GLuint id, GLsizeiptr size) {
+ DCHECK_NE(0u, id);
+ DCHECK_LE(0, size);
+ int32 shm_id = -1;
+ uint32 shm_offset = 0;
+ void* address = NULL;
+ if (size)
+ address = mapped_memory_->Alloc(size, &shm_id, &shm_offset);
+
+ Buffer* buffer = new Buffer(id, size, shm_id, shm_offset, address);
+ std::pair<BufferMap::iterator, bool> result =
+ buffers_.insert(std::make_pair(id, buffer));
+ DCHECK(result.second);
+ return buffer;
+}
+
+BufferTracker::Buffer* BufferTracker::GetBuffer(GLuint client_id) {
+ BufferMap::iterator it = buffers_.find(client_id);
+ return it != buffers_.end() ? it->second : NULL;
+}
+
+void BufferTracker::RemoveBuffer(GLuint client_id) {
+ BufferMap::iterator it = buffers_.find(client_id);
+ if (it != buffers_.end()) {
+ Buffer* buffer = it->second;
+ buffers_.erase(it);
+ if (buffer->address_)
+ mapped_memory_->Free(buffer->address_);
+ delete buffer;
+ }
+}
+
+void BufferTracker::FreePendingToken(Buffer* buffer, int32 token) {
+ if (buffer->address_)
+ mapped_memory_->FreePendingToken(buffer->address_, token);
+ buffer->size_ = 0;
+ buffer->shm_id_ = 0;
+ buffer->shm_offset_ = 0;
+ buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
+}
+
+void BufferTracker::Unmanage(Buffer* buffer) {
+ buffer->size_ = 0;
+ buffer->shm_id_ = 0;
+ buffer->shm_offset_ = 0;
+ buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
+}
+
+void BufferTracker::Free(Buffer* buffer) {
+ if (buffer->address_)
+ mapped_memory_->Free(buffer->address_);
+
+ buffer->size_ = 0;
+ buffer->shm_id_ = 0;
+ buffer->shm_offset_ = 0;
+ buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/buffer_tracker.h b/gpu/command_buffer/client/buffer_tracker.h
new file mode 100644
index 0000000..33bd94b
--- /dev/null
+++ b/gpu/command_buffer/client/buffer_tracker.h
@@ -0,0 +1,125 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_BUFFER_TRACKER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_BUFFER_TRACKER_H_
+
+#include <GLES2/gl2.h>
+
+#include <queue>
+#include "base/containers/hash_tables.h"
+#include "gles2_impl_export.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+class MappedMemoryManager;
+
+namespace gles2 {
+
+// Tracks buffer objects for client side of command buffer.
+class GLES2_IMPL_EXPORT BufferTracker {
+ public:
+ class GLES2_IMPL_EXPORT Buffer {
+ public:
+ Buffer(GLuint id,
+ unsigned int size,
+ int32 shm_id,
+ uint32 shm_offset,
+ void* address)
+ : id_(id),
+ size_(size),
+ shm_id_(shm_id),
+ shm_offset_(shm_offset),
+ address_(address),
+ mapped_(false),
+ last_usage_token_(0),
+ last_async_upload_token_(0) {
+ }
+
+ GLenum id() const {
+ return id_;
+ }
+
+ unsigned int size() const {
+ return size_;
+ }
+
+ int32 shm_id() const {
+ return shm_id_;
+ }
+
+ uint32 shm_offset() const {
+ return shm_offset_;
+ }
+
+ void* address() const {
+ return address_;
+ }
+
+ void set_mapped(bool mapped) {
+ mapped_ = mapped;
+ }
+
+ bool mapped() const {
+ return mapped_;
+ }
+
+ void set_last_usage_token(int token) {
+ last_usage_token_ = token;
+ }
+
+ int last_usage_token() const {
+ return last_usage_token_;
+ }
+
+ void set_last_async_upload_token(uint32 async_token) {
+ last_async_upload_token_ = async_token;
+ }
+
+ GLuint last_async_upload_token() const {
+ return last_async_upload_token_;
+ }
+
+ private:
+ friend class BufferTracker;
+ friend class BufferTrackerTest;
+
+ GLuint id_;
+ unsigned int size_;
+ int32 shm_id_;
+ uint32 shm_offset_;
+ void* address_;
+ bool mapped_;
+ int32 last_usage_token_;
+ GLuint last_async_upload_token_;
+ };
+
+ BufferTracker(MappedMemoryManager* manager);
+ ~BufferTracker();
+
+ Buffer* CreateBuffer(GLuint id, GLsizeiptr size);
+ Buffer* GetBuffer(GLuint id);
+ void RemoveBuffer(GLuint id);
+
+ // Frees the block of memory associated with buffer, pending the passage
+ // of a token.
+ void FreePendingToken(Buffer* buffer, int32 token);
+ void Unmanage(Buffer* buffer);
+ void Free(Buffer* buffer);
+
+ private:
+ typedef base::hash_map<GLuint, Buffer*> BufferMap;
+
+ MappedMemoryManager* mapped_memory_;
+ BufferMap buffers_;
+
+ DISALLOW_COPY_AND_ASSIGN(BufferTracker);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_BUFFER_TRACKER_H_
diff --git a/gpu/command_buffer/client/buffer_tracker_unittest.cc b/gpu/command_buffer/client/buffer_tracker_unittest.cc
new file mode 100644
index 0000000..f6174c0
--- /dev/null
+++ b/gpu/command_buffer/client/buffer_tracker_unittest.cc
@@ -0,0 +1,153 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the BufferTracker.
+
+#include "gpu/command_buffer/client/buffer_tracker.h"
+
+#include <GLES2/gl2ext.h>
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class MockClientCommandBufferImpl : public MockClientCommandBuffer {
+ public:
+ MockClientCommandBufferImpl()
+ : MockClientCommandBuffer(),
+ context_lost_(false) {}
+ virtual ~MockClientCommandBufferImpl() {}
+
+ virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ int32* id) OVERRIDE {
+ if (context_lost_) {
+ *id = -1;
+ return NULL;
+ }
+ return MockClientCommandBuffer::CreateTransferBuffer(size, id);
+ }
+
+ void set_context_lost(bool context_lost) {
+ context_lost_ = context_lost;
+ }
+
+ private:
+ bool context_lost_;
+};
+
+namespace {
+void EmptyPoll() {
+}
+}
+
+class BufferTrackerTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+
+ virtual void SetUp() {
+ command_buffer_.reset(new MockClientCommandBufferImpl());
+ helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
+ buffer_tracker_.reset(new BufferTracker(mapped_memory_.get()));
+ }
+
+ virtual void TearDown() {
+ buffer_tracker_.reset();
+ mapped_memory_.reset();
+ helper_.reset();
+ command_buffer_.reset();
+ }
+
+ scoped_ptr<MockClientCommandBufferImpl> command_buffer_;
+ scoped_ptr<GLES2CmdHelper> helper_;
+ scoped_ptr<MappedMemoryManager> mapped_memory_;
+ scoped_ptr<BufferTracker> buffer_tracker_;
+};
+
+TEST_F(BufferTrackerTest, Basic) {
+ const GLuint kId1 = 123;
+ const GLuint kId2 = 124;
+ const GLsizeiptr size = 64;
+
+ // Check we can create a Buffer.
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId1, size);
+ ASSERT_TRUE(buffer != NULL);
+ // Check we can get the same Buffer.
+ EXPECT_EQ(buffer, buffer_tracker_->GetBuffer(kId1));
+ // Check mapped memory address.
+ EXPECT_TRUE(buffer->address() != NULL);
+ // Check shared memory was allocated.
+ EXPECT_EQ(1lu, mapped_memory_->num_chunks());
+ // Check we get nothing for a non-existent buffer.
+ EXPECT_TRUE(buffer_tracker_->GetBuffer(kId2) == NULL);
+ // Check we can delete the buffer.
+ buffer_tracker_->RemoveBuffer(kId1);
+ // Check shared memory was freed.
+ mapped_memory_->FreeUnused();
+ EXPECT_EQ(0lu, mapped_memory_->num_chunks());
+ // Check we get nothing for a non-existent buffer.
+ EXPECT_TRUE(buffer_tracker_->GetBuffer(kId1) == NULL);
+}
+
+TEST_F(BufferTrackerTest, ZeroSize) {
+ const GLuint kId = 123;
+
+ // Check we can create a Buffer with zero size.
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId, 0);
+ ASSERT_TRUE(buffer != NULL);
+ // Check mapped memory address.
+ EXPECT_TRUE(buffer->address() == NULL);
+ // Check no shared memory was allocated.
+ EXPECT_EQ(0lu, mapped_memory_->num_chunks());
+ // Check we can delete the buffer.
+ buffer_tracker_->RemoveBuffer(kId);
+}
+
+TEST_F(BufferTrackerTest, LostContext) {
+ const GLuint kId = 123;
+ const GLsizeiptr size = 64;
+
+ command_buffer_->set_context_lost(true);
+ // Check we can create a Buffer when after losing context.
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId, size);
+ ASSERT_TRUE(buffer != NULL);
+ // Check mapped memory address.
+ EXPECT_EQ(64u, buffer->size());
+ // Check mapped memory address.
+ EXPECT_TRUE(buffer->address() == NULL);
+ // Check no shared memory was allocated.
+ EXPECT_EQ(0lu, mapped_memory_->num_chunks());
+ // Check we can delete the buffer.
+ buffer_tracker_->RemoveBuffer(kId);
+}
+
+TEST_F(BufferTrackerTest, Unmanage) {
+ const GLuint kId = 123;
+ const GLsizeiptr size = 64;
+
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId, size);
+ ASSERT_TRUE(buffer != NULL);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(size));
+
+ void* mem = buffer->address();
+ buffer_tracker_->Unmanage(buffer);
+ buffer_tracker_->RemoveBuffer(kId);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(size));
+
+ mapped_memory_->Free(mem);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(0));
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/client_context_state.cc b/gpu/command_buffer/client/client_context_state.cc
new file mode 100644
index 0000000..9f4fbad
--- /dev/null
+++ b/gpu/command_buffer/client/client_context_state.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/client_context_state.h"
+
+#include "base/logging.h"
+
+namespace gpu {
+namespace gles2 {
+
+ClientContextState::ClientContextState() {
+}
+
+ClientContextState::~ClientContextState() {
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/client_context_state_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/client_context_state.h b/gpu/command_buffer/client/client_context_state.h
new file mode 100644
index 0000000..f5a93a6
--- /dev/null
+++ b/gpu/command_buffer/client/client_context_state.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the ContextState class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_H_
+
+#include <GLES2/gl2.h>
+#include <vector>
+#include "gles2_impl_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+struct GLES2_IMPL_EXPORT ClientContextState {
+ ClientContextState();
+ ~ClientContextState();
+
+ // Returns true if state was cached in which case 'enabled' will be set to the
+ // current state.
+ bool GetEnabled(GLenum cap, bool* enabled) const;
+
+ // Sets the state of a capability.
+ // Returns true if the capability is one that is cached.
+ // 'changed' will be true if the state was different from 'enabled.
+ bool SetCapabilityState(GLenum cap, bool enabled, bool* changed);
+
+ #include "gpu/command_buffer/client/client_context_state_autogen.h"
+
+ EnableFlags enable_flags;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_H_
+
diff --git a/gpu/command_buffer/client/client_context_state_autogen.h b/gpu/command_buffer/client/client_context_state_autogen.h
new file mode 100644
index 0000000..72a4f72
--- /dev/null
+++ b/gpu/command_buffer/client/client_context_state_autogen.h
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by client_context_state.h
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_AUTOGEN_H_
+
+struct EnableFlags {
+ EnableFlags();
+ bool blend;
+ bool cull_face;
+ bool depth_test;
+ bool dither;
+ bool polygon_offset_fill;
+ bool sample_alpha_to_coverage;
+ bool sample_coverage;
+ bool scissor_test;
+ bool stencil_test;
+};
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/client_context_state_impl_autogen.h b/gpu/command_buffer/client/client_context_state_impl_autogen.h
new file mode 100644
index 0000000..cff14f7
--- /dev/null
+++ b/gpu/command_buffer/client/client_context_state_impl_autogen.h
@@ -0,0 +1,123 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// It is included by client_context_state.cc
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_IMPL_AUTOGEN_H_
+
+ClientContextState::EnableFlags::EnableFlags()
+ : blend(false),
+ cull_face(false),
+ depth_test(false),
+ dither(true),
+ polygon_offset_fill(false),
+ sample_alpha_to_coverage(false),
+ sample_coverage(false),
+ scissor_test(false),
+ stencil_test(false) {
+}
+
+bool ClientContextState::SetCapabilityState(GLenum cap,
+ bool enabled,
+ bool* changed) {
+ *changed = false;
+ switch (cap) {
+ case GL_BLEND:
+ if (enable_flags.blend != enabled) {
+ *changed = true;
+ enable_flags.blend = enabled;
+ }
+ return true;
+ case GL_CULL_FACE:
+ if (enable_flags.cull_face != enabled) {
+ *changed = true;
+ enable_flags.cull_face = enabled;
+ }
+ return true;
+ case GL_DEPTH_TEST:
+ if (enable_flags.depth_test != enabled) {
+ *changed = true;
+ enable_flags.depth_test = enabled;
+ }
+ return true;
+ case GL_DITHER:
+ if (enable_flags.dither != enabled) {
+ *changed = true;
+ enable_flags.dither = enabled;
+ }
+ return true;
+ case GL_POLYGON_OFFSET_FILL:
+ if (enable_flags.polygon_offset_fill != enabled) {
+ *changed = true;
+ enable_flags.polygon_offset_fill = enabled;
+ }
+ return true;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ if (enable_flags.sample_alpha_to_coverage != enabled) {
+ *changed = true;
+ enable_flags.sample_alpha_to_coverage = enabled;
+ }
+ return true;
+ case GL_SAMPLE_COVERAGE:
+ if (enable_flags.sample_coverage != enabled) {
+ *changed = true;
+ enable_flags.sample_coverage = enabled;
+ }
+ return true;
+ case GL_SCISSOR_TEST:
+ if (enable_flags.scissor_test != enabled) {
+ *changed = true;
+ enable_flags.scissor_test = enabled;
+ }
+ return true;
+ case GL_STENCIL_TEST:
+ if (enable_flags.stencil_test != enabled) {
+ *changed = true;
+ enable_flags.stencil_test = enabled;
+ }
+ return true;
+ default:
+ return false;
+ }
+}
+bool ClientContextState::GetEnabled(GLenum cap, bool* enabled) const {
+ switch (cap) {
+ case GL_BLEND:
+ *enabled = enable_flags.blend;
+ return true;
+ case GL_CULL_FACE:
+ *enabled = enable_flags.cull_face;
+ return true;
+ case GL_DEPTH_TEST:
+ *enabled = enable_flags.depth_test;
+ return true;
+ case GL_DITHER:
+ *enabled = enable_flags.dither;
+ return true;
+ case GL_POLYGON_OFFSET_FILL:
+ *enabled = enable_flags.polygon_offset_fill;
+ return true;
+ case GL_SAMPLE_ALPHA_TO_COVERAGE:
+ *enabled = enable_flags.sample_alpha_to_coverage;
+ return true;
+ case GL_SAMPLE_COVERAGE:
+ *enabled = enable_flags.sample_coverage;
+ return true;
+ case GL_SCISSOR_TEST:
+ *enabled = enable_flags.scissor_test;
+ return true;
+ case GL_STENCIL_TEST:
+ *enabled = enable_flags.stencil_test;
+ return true;
+ default:
+ return false;
+ }
+}
+#endif // GPU_COMMAND_BUFFER_CLIENT_CLIENT_CONTEXT_STATE_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/client_test_helper.cc b/gpu/command_buffer/client/client_test_helper.cc
new file mode 100644
index 0000000..8c633ef
--- /dev/null
+++ b/gpu/command_buffer/client/client_test_helper.cc
@@ -0,0 +1,158 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for GLES2Implementation.
+
+#include "gpu/command_buffer/client/client_test_helper.h"
+
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+
+namespace gpu {
+
+MockCommandBufferBase::MockCommandBufferBase() {
+}
+
+MockCommandBufferBase::~MockCommandBufferBase() {
+}
+
+bool MockCommandBufferBase::Initialize() {
+ return true;
+}
+
+CommandBuffer::State MockCommandBufferBase::GetLastState() {
+ return state_;
+}
+
+int32 MockCommandBufferBase::GetLastToken() {
+ return state_.token;
+}
+
+void MockCommandBufferBase::SetGetOffset(int32 get_offset) {
+ state_.get_offset = get_offset;
+}
+
+void MockCommandBufferBase::WaitForTokenInRange(int32 start, int32 end) {}
+
+void MockCommandBufferBase::WaitForGetOffsetInRange(int32 start, int32 end) {
+ state_.get_offset = state_.put_offset;
+ OnFlush();
+}
+
+void MockCommandBufferBase::SetGetBuffer(int transfer_buffer_id) {
+ ring_buffer_buffer_ = GetTransferBuffer(transfer_buffer_id);
+ ring_buffer_ =
+ static_cast<CommandBufferEntry*>(ring_buffer_buffer_->memory());
+ state_.num_entries = ring_buffer_buffer_->size() / sizeof(ring_buffer_[0]);
+ state_.token = 10000; // All token checks in the tests should pass.
+}
+
+// Get's the Id of the next transfer buffer that will be returned
+// by CreateTransferBuffer. This is useful for testing expected ids.
+int32 MockCommandBufferBase::GetNextFreeTransferBufferId() {
+ for (size_t ii = 0; ii < arraysize(transfer_buffer_buffers_); ++ii) {
+ if (!transfer_buffer_buffers_[ii].get()) {
+ return kTransferBufferBaseId + ii;
+ }
+ }
+ return -1;
+}
+
+scoped_refptr<gpu::Buffer> MockCommandBufferBase::CreateTransferBuffer(
+ size_t size,
+ int32* id) {
+ *id = GetNextFreeTransferBufferId();
+ if (*id >= 0) {
+ int32 ndx = *id - kTransferBufferBaseId;
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(size);
+ transfer_buffer_buffers_[ndx] =
+ MakeBufferFromSharedMemory(shared_memory.Pass(), size);
+ }
+ return GetTransferBuffer(*id);
+}
+
+void MockCommandBufferBase::DestroyTransferBufferHelper(int32 id) {
+ DCHECK_GE(id, kTransferBufferBaseId);
+ DCHECK_LT(id, kTransferBufferBaseId + kMaxTransferBuffers);
+ id -= kTransferBufferBaseId;
+ transfer_buffer_buffers_[id] = NULL;
+}
+
+scoped_refptr<Buffer> MockCommandBufferBase::GetTransferBuffer(int32 id) {
+ DCHECK_GE(id, kTransferBufferBaseId);
+ DCHECK_LT(id, kTransferBufferBaseId + kMaxTransferBuffers);
+ return transfer_buffer_buffers_[id - kTransferBufferBaseId];
+}
+
+void MockCommandBufferBase::FlushHelper(int32 put_offset) {
+ state_.put_offset = put_offset;
+}
+
+void MockCommandBufferBase::SetToken(int32 token) {
+ NOTREACHED();
+ state_.token = token;
+}
+
+void MockCommandBufferBase::SetParseError(error::Error error) {
+ NOTREACHED();
+ state_.error = error;
+}
+
+void MockCommandBufferBase::SetContextLostReason(
+ error::ContextLostReason reason) {
+ NOTREACHED();
+ state_.context_lost_reason = reason;
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const int32 MockCommandBufferBase::kTransferBufferBaseId;
+const int32 MockCommandBufferBase::kMaxTransferBuffers;
+#endif
+
+MockClientCommandBuffer::MockClientCommandBuffer() {
+ DelegateToFake();
+}
+
+MockClientCommandBuffer::~MockClientCommandBuffer() {
+}
+
+void MockClientCommandBuffer::Flush(int32 put_offset) {
+ FlushHelper(put_offset);
+}
+
+void MockClientCommandBuffer::DelegateToFake() {
+ ON_CALL(*this, DestroyTransferBuffer(_))
+ .WillByDefault(Invoke(
+ this, &MockCommandBufferBase::DestroyTransferBufferHelper));
+}
+
+MockClientCommandBufferMockFlush::MockClientCommandBufferMockFlush() {
+ DelegateToFake();
+}
+
+MockClientCommandBufferMockFlush::~MockClientCommandBufferMockFlush() {
+}
+
+void MockClientCommandBufferMockFlush::DelegateToFake() {
+ MockClientCommandBuffer::DelegateToFake();
+ ON_CALL(*this, Flush(_))
+ .WillByDefault(Invoke(
+ this, &MockCommandBufferBase::FlushHelper));
+}
+
+MockClientGpuControl::MockClientGpuControl() {
+}
+
+MockClientGpuControl::~MockClientGpuControl() {
+}
+
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/client_test_helper.h b/gpu/command_buffer/client/client_test_helper.h
new file mode 100644
index 0000000..a0363a9
--- /dev/null
+++ b/gpu/command_buffer/client/client_test_helper.h
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper classes for implementing gpu client side unit tests.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CLIENT_TEST_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CLIENT_TEST_HELPER_H_
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/gpu_memory_allocation.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+
+class MockCommandBufferBase : public CommandBufferServiceBase {
+ public:
+ static const int32 kTransferBufferBaseId = 0x123;
+ static const int32 kMaxTransferBuffers = 6;
+
+ MockCommandBufferBase();
+ virtual ~MockCommandBufferBase();
+
+ virtual bool Initialize() OVERRIDE;
+ virtual State GetLastState() OVERRIDE;
+ virtual int32 GetLastToken() OVERRIDE;
+ virtual void WaitForTokenInRange(int32 start, int32 end) OVERRIDE;
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE;
+ virtual void SetGetBuffer(int transfer_buffer_id) OVERRIDE;
+ virtual void SetGetOffset(int32 get_offset) OVERRIDE;
+ virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
+ int32* id) OVERRIDE;
+ virtual scoped_refptr<gpu::Buffer> GetTransferBuffer(int32 id) OVERRIDE;
+ virtual void SetToken(int32 token) OVERRIDE;
+ virtual void SetParseError(error::Error error) OVERRIDE;
+ virtual void SetContextLostReason(error::ContextLostReason reason) OVERRIDE;
+
+ // Get's the Id of the next transfer buffer that will be returned
+ // by CreateTransferBuffer. This is useful for testing expected ids.
+ int32 GetNextFreeTransferBufferId();
+
+ void FlushHelper(int32 put_offset);
+ void DestroyTransferBufferHelper(int32 id);
+
+ virtual void OnFlush() = 0;
+
+ private:
+ scoped_refptr<Buffer> transfer_buffer_buffers_[kMaxTransferBuffers];
+ CommandBufferEntry* ring_buffer_;
+ scoped_refptr<Buffer> ring_buffer_buffer_;
+ State state_;
+};
+
+class MockClientCommandBuffer : public MockCommandBufferBase {
+ public:
+ MockClientCommandBuffer();
+ virtual ~MockClientCommandBuffer();
+
+ // This is so we can use all the gmock functions when Flush is called.
+ MOCK_METHOD0(OnFlush, void());
+ MOCK_METHOD1(DestroyTransferBuffer, void(int32 id));
+
+ virtual void Flush(int32 put_offset) OVERRIDE;
+
+ void DelegateToFake();
+};
+
+class MockClientCommandBufferMockFlush : public MockClientCommandBuffer {
+ public:
+ MockClientCommandBufferMockFlush();
+ virtual ~MockClientCommandBufferMockFlush();
+
+ MOCK_METHOD1(Flush, void(int32 put_offset));
+
+ void DelegateToFake();
+};
+
+class MockClientGpuControl : public GpuControl {
+ public:
+ MockClientGpuControl();
+ virtual ~MockClientGpuControl();
+
+ MOCK_METHOD0(GetCapabilities, Capabilities());
+ MOCK_METHOD5(CreateGpuMemoryBuffer,
+ gfx::GpuMemoryBuffer*(size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32* id));
+ MOCK_METHOD1(DestroyGpuMemoryBuffer, void(int32 id));
+ MOCK_METHOD0(InsertSyncPoint, uint32());
+ MOCK_METHOD0(InsertFutureSyncPoint, uint32());
+ MOCK_METHOD1(RetireSyncPoint, void(uint32 id));
+ MOCK_METHOD2(SignalSyncPoint, void(uint32 id, const base::Closure& callback));
+ MOCK_METHOD2(SignalQuery, void(uint32 query, const base::Closure& callback));
+ MOCK_METHOD1(SetSurfaceVisible, void(bool visible));
+ MOCK_METHOD1(CreateStreamTexture, uint32(uint32));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockClientGpuControl);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CLIENT_TEST_HELPER_H_
+
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.cc b/gpu/command_buffer/client/cmd_buffer_helper.cc
new file mode 100644
index 0000000..a99201e
--- /dev/null
+++ b/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -0,0 +1,293 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the command buffer helper class.
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/trace_event.h"
+
+namespace gpu {
+
+CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
+ : command_buffer_(command_buffer),
+ ring_buffer_id_(-1),
+ ring_buffer_size_(0),
+ entries_(NULL),
+ total_entry_count_(0),
+ immediate_entry_count_(0),
+ token_(0),
+ put_(0),
+ last_put_sent_(0),
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+ commands_issued_(0),
+#endif
+ usable_(true),
+ context_lost_(false),
+ flush_automatically_(true),
+ flush_generation_(0) {
+}
+
+void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
+ flush_automatically_ = enabled;
+ CalcImmediateEntries(0);
+}
+
+bool CommandBufferHelper::IsContextLost() {
+ if (!context_lost_) {
+ context_lost_ = error::IsError(command_buffer()->GetLastError());
+ }
+ return context_lost_;
+}
+
+void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
+ DCHECK_GE(waiting_count, 0);
+
+ // Check if usable & allocated.
+ if (!usable() || !HaveRingBuffer()) {
+ immediate_entry_count_ = 0;
+ return;
+ }
+
+ // Get maximum safe contiguous entries.
+ const int32 curr_get = get_offset();
+ if (curr_get > put_) {
+ immediate_entry_count_ = curr_get - put_ - 1;
+ } else {
+ immediate_entry_count_ =
+ total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
+ }
+
+ // Limit entry count to force early flushing.
+ if (flush_automatically_) {
+ int32 limit =
+ total_entry_count_ /
+ ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
+
+ int32 pending =
+ (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
+
+ if (pending > 0 && pending >= limit) {
+ // Time to force flush.
+ immediate_entry_count_ = 0;
+ } else {
+ // Limit remaining entries, but not lower than waiting_count entries to
+ // prevent deadlock when command size is greater than the flush limit.
+ limit -= pending;
+ limit = limit < waiting_count ? waiting_count : limit;
+ immediate_entry_count_ =
+ immediate_entry_count_ > limit ? limit : immediate_entry_count_;
+ }
+ }
+}
+
+bool CommandBufferHelper::AllocateRingBuffer() {
+ if (!usable()) {
+ return false;
+ }
+
+ if (HaveRingBuffer()) {
+ return true;
+ }
+
+ int32 id = -1;
+ scoped_refptr<Buffer> buffer =
+ command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
+ if (id < 0) {
+ ClearUsable();
+ return false;
+ }
+
+ ring_buffer_ = buffer;
+ ring_buffer_id_ = id;
+ command_buffer_->SetGetBuffer(id);
+ entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
+ total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
+ // Call to SetGetBuffer(id) above resets get and put offsets to 0.
+ // No need to query it through IPC.
+ put_ = 0;
+ CalcImmediateEntries(0);
+ return true;
+}
+
+void CommandBufferHelper::FreeResources() {
+ if (HaveRingBuffer()) {
+ command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
+ ring_buffer_id_ = -1;
+ CalcImmediateEntries(0);
+ }
+}
+
+void CommandBufferHelper::FreeRingBuffer() {
+ CHECK((put_ == get_offset()) ||
+ error::IsError(command_buffer_->GetLastState().error));
+ FreeResources();
+}
+
+bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
+ ring_buffer_size_ = ring_buffer_size;
+ return AllocateRingBuffer();
+}
+
+CommandBufferHelper::~CommandBufferHelper() {
+ FreeResources();
+}
+
+bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start, int32 end) {
+ if (!usable()) {
+ return false;
+ }
+ command_buffer_->WaitForGetOffsetInRange(start, end);
+ return command_buffer_->GetLastError() == gpu::error::kNoError;
+}
+
+void CommandBufferHelper::Flush() {
+ // Wrap put_ before flush.
+ if (put_ == total_entry_count_)
+ put_ = 0;
+
+ if (usable() && last_put_sent_ != put_) {
+ last_flush_time_ = base::TimeTicks::Now();
+ last_put_sent_ = put_;
+ command_buffer_->Flush(put_);
+ ++flush_generation_;
+ CalcImmediateEntries(0);
+ }
+}
+
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+void CommandBufferHelper::PeriodicFlushCheck() {
+ base::TimeTicks current_time = base::TimeTicks::Now();
+ if (current_time - last_flush_time_ >
+ base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
+ Flush();
+ }
+}
+#endif
+
+// Calls Flush() and then waits until the buffer is empty. Break early if the
+// error is set.
+bool CommandBufferHelper::Finish() {
+ TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
+ if (!usable()) {
+ return false;
+ }
+ // If there is no work just exit.
+ if (put_ == get_offset()) {
+ return true;
+ }
+ DCHECK(HaveRingBuffer());
+ Flush();
+ if (!WaitForGetOffsetInRange(put_, put_))
+ return false;
+ DCHECK_EQ(get_offset(), put_);
+
+ CalcImmediateEntries(0);
+
+ return true;
+}
+
+// Inserts a new token into the command stream. It uses an increasing value
+// scheme so that we don't lose tokens (a token has passed if the current token
+// value is higher than that token). Calls Finish() if the token value wraps,
+// which will be rare.
+int32 CommandBufferHelper::InsertToken() {
+ AllocateRingBuffer();
+ if (!usable()) {
+ return token_;
+ }
+ DCHECK(HaveRingBuffer());
+ // Increment token as 31-bit integer. Negative values are used to signal an
+ // error.
+ token_ = (token_ + 1) & 0x7FFFFFFF;
+ cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
+ if (cmd) {
+ cmd->Init(token_);
+ if (token_ == 0) {
+ TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
+ // we wrapped
+ Finish();
+ DCHECK_EQ(token_, last_token_read());
+ }
+ }
+ return token_;
+}
+
+// Waits until the current token value is greater or equal to the value passed
+// in argument.
+void CommandBufferHelper::WaitForToken(int32 token) {
+ if (!usable() || !HaveRingBuffer()) {
+ return;
+ }
+ // Return immediately if corresponding InsertToken failed.
+ if (token < 0)
+ return;
+ if (token > token_) return; // we wrapped
+ if (last_token_read() >= token)
+ return;
+ Flush();
+ command_buffer_->WaitForTokenInRange(token, token_);
+}
+
+// Waits for available entries, basically waiting until get >= put + count + 1.
+// It actually waits for contiguous entries, so it may need to wrap the buffer
+// around, adding a noops. Thus this function may change the value of put_. The
+// function will return early if an error occurs, in which case the available
+// space may not be available.
+void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
+ AllocateRingBuffer();
+ if (!usable()) {
+ return;
+ }
+ DCHECK(HaveRingBuffer());
+ DCHECK(count < total_entry_count_);
+ if (put_ + count > total_entry_count_) {
+ // There's not enough room between the current put and the end of the
+ // buffer, so we need to wrap. We will add noops all the way to the end,
+ // but we need to make sure get wraps first, actually that get is 1 or
+ // more (since put will wrap to 0 after we add the noops).
+ DCHECK_LE(1, put_);
+ int32 curr_get = get_offset();
+ if (curr_get > put_ || curr_get == 0) {
+ TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
+ Flush();
+ if (!WaitForGetOffsetInRange(1, put_))
+ return;
+ curr_get = get_offset();
+ DCHECK_LE(curr_get, put_);
+ DCHECK_NE(0, curr_get);
+ }
+ // Insert Noops to fill out the buffer.
+ int32 num_entries = total_entry_count_ - put_;
+ while (num_entries > 0) {
+ int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
+ cmd::Noop::Set(&entries_[put_], num_to_skip);
+ put_ += num_to_skip;
+ num_entries -= num_to_skip;
+ }
+ put_ = 0;
+ }
+
+ // Try to get 'count' entries without flushing.
+ CalcImmediateEntries(count);
+ if (immediate_entry_count_ < count) {
+ // Try again with a shallow Flush().
+ Flush();
+ CalcImmediateEntries(count);
+ if (immediate_entry_count_ < count) {
+ // Buffer is full. Need to wait for entries.
+ TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
+ if (!WaitForGetOffsetInRange(put_ + count + 1, put_))
+ return;
+ CalcImmediateEntries(count);
+ DCHECK_GE(immediate_entry_count_, count);
+ }
+ }
+}
+
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.h b/gpu/command_buffer/client/cmd_buffer_helper.h
new file mode 100644
index 0000000..954107f
--- /dev/null
+++ b/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -0,0 +1,342 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the command buffer helper class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
+
+#include <string.h>
+#include <time.h>
+
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/cmd_buffer_common.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+#if !defined(OS_ANDROID)
+#define CMD_HELPER_PERIODIC_FLUSH_CHECK
+const int kCommandsPerFlushCheck = 100;
+const int kPeriodicFlushDelayInMicroseconds =
+ base::Time::kMicrosecondsPerSecond / (5 * 60);
+#endif
+
+const int kAutoFlushSmall = 16; // 1/16 of the buffer
+const int kAutoFlushBig = 2; // 1/2 of the buffer
+
+// Command buffer helper class. This class simplifies ring buffer management:
+// it will allocate the buffer, give it to the buffer interface, and let the
+// user add commands to it, while taking care of the synchronization (put and
+// get). It also provides a way to ensure commands have been executed, through
+// the token mechanism:
+//
+// helper.AddCommand(...);
+// helper.AddCommand(...);
+// int32 token = helper.InsertToken();
+// helper.AddCommand(...);
+// helper.AddCommand(...);
+// [...]
+//
+// helper.WaitForToken(token); // this doesn't return until the first two
+// // commands have been executed.
+class GPU_EXPORT CommandBufferHelper {
+ public:
+ explicit CommandBufferHelper(CommandBuffer* command_buffer);
+ virtual ~CommandBufferHelper();
+
+ // Initializes the CommandBufferHelper.
+ // Parameters:
+ // ring_buffer_size: The size of the ring buffer portion of the command
+ // buffer.
+ bool Initialize(int32 ring_buffer_size);
+
+ // Sets whether the command buffer should automatically flush periodically
+ // to try to increase performance. Defaults to true.
+ void SetAutomaticFlushes(bool enabled);
+
+ // True if the context is lost.
+ bool IsContextLost();
+
+ // Asynchronously flushes the commands, setting the put pointer to let the
+ // buffer interface know that new commands have been added. After a flush
+ // returns, the command buffer service is aware of all pending commands.
+ void Flush();
+
+ // Waits until all the commands have been executed. Returns whether it
+ // was successful. The function will fail if the command buffer service has
+ // disconnected.
+ bool Finish();
+
+ // Waits until a given number of available entries are available.
+ // Parameters:
+ // count: number of entries needed. This value must be at most
+ // the size of the buffer minus one.
+ void WaitForAvailableEntries(int32 count);
+
+ // Inserts a new token into the command buffer. This token either has a value
+ // different from previously inserted tokens, or ensures that previously
+ // inserted tokens with that value have already passed through the command
+ // stream.
+ // Returns:
+ // the value of the new token or -1 if the command buffer reader has
+ // shutdown.
+ int32 InsertToken();
+
+ // Returns true if the token has passed.
+ // Parameters:
+ // the value of the token to check whether it has passed
+ bool HasTokenPassed(int32 token) const {
+ if (token > token_)
+ return true; // we wrapped
+ return last_token_read() >= token;
+ }
+
+ // Waits until the token of a particular value has passed through the command
+ // stream (i.e. commands inserted before that token have been executed).
+ // NOTE: This will call Flush if it needs to block.
+ // Parameters:
+ // the value of the token to wait for.
+ void WaitForToken(int32 token);
+
+ // Called prior to each command being issued. Waits for a certain amount of
+ // space to be available. Returns address of space.
+ void* GetSpace(int32 entries) {
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+ // Allow this command buffer to be pre-empted by another if a "reasonable"
+ // amount of work has been done. On highend machines, this reduces the
+ // latency of GPU commands. However, on Android, this can cause the
+ // kernel to thrash between generating GPU commands and executing them.
+ ++commands_issued_;
+ if (flush_automatically_ &&
+ (commands_issued_ % kCommandsPerFlushCheck == 0)) {
+ PeriodicFlushCheck();
+ }
+#endif
+
+ // Test for immediate entries.
+ if (entries > immediate_entry_count_) {
+ WaitForAvailableEntries(entries);
+ if (entries > immediate_entry_count_)
+ return NULL;
+ }
+
+ DCHECK_LE(entries, immediate_entry_count_);
+
+ // Allocate space and advance put_.
+ CommandBufferEntry* space = &entries_[put_];
+ put_ += entries;
+ immediate_entry_count_ -= entries;
+
+ DCHECK_LE(put_, total_entry_count_);
+ return space;
+ }
+
+ template <typename T>
+ void ForceNullCheck(T* data) {
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+ // 64-bit MSVC's alias analysis was determining that the command buffer
+ // entry couldn't be NULL, so it optimized out the NULL check.
+ // Dereferencing the same datatype through a volatile pointer seems to
+ // prevent that from happening. http://crbug.com/361936
+ if (data)
+ static_cast<volatile T*>(data)->header;
+#endif
+ }
+
+ // Typed version of GetSpace. Gets enough room for the given type and returns
+ // a reference to it.
+ template <typename T>
+ T* GetCmdSpace() {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ int32 space_needed = ComputeNumEntries(sizeof(T));
+ T* data = static_cast<T*>(GetSpace(space_needed));
+ ForceNullCheck(data);
+ return data;
+ }
+
+ // Typed version of GetSpace for immediate commands.
+ template <typename T>
+ T* GetImmediateCmdSpace(size_t data_space) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ int32 space_needed = ComputeNumEntries(sizeof(T) + data_space);
+ T* data = static_cast<T*>(GetSpace(space_needed));
+ ForceNullCheck(data);
+ return data;
+ }
+
+ // Typed version of GetSpace for immediate commands.
+ template <typename T>
+ T* GetImmediateCmdSpaceTotalSize(size_t total_space) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ int32 space_needed = ComputeNumEntries(total_space);
+ T* data = static_cast<T*>(GetSpace(space_needed));
+ ForceNullCheck(data);
+ return data;
+ }
+
+ int32 last_token_read() const {
+ return command_buffer_->GetLastToken();
+ }
+
+ int32 get_offset() const {
+ return command_buffer_->GetLastState().get_offset;
+ }
+
+ // Common Commands
+ void Noop(uint32 skip_count) {
+ cmd::Noop* cmd = GetImmediateCmdSpace<cmd::Noop>(
+ (skip_count - 1) * sizeof(CommandBufferEntry));
+ if (cmd) {
+ cmd->Init(skip_count);
+ }
+ }
+
+ void SetToken(uint32 token) {
+ cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
+ if (cmd) {
+ cmd->Init(token);
+ }
+ }
+
+ void SetBucketSize(uint32 bucket_id, uint32 size) {
+ cmd::SetBucketSize* cmd = GetCmdSpace<cmd::SetBucketSize>();
+ if (cmd) {
+ cmd->Init(bucket_id, size);
+ }
+ }
+
+ void SetBucketData(uint32 bucket_id,
+ uint32 offset,
+ uint32 size,
+ uint32 shared_memory_id,
+ uint32 shared_memory_offset) {
+ cmd::SetBucketData* cmd = GetCmdSpace<cmd::SetBucketData>();
+ if (cmd) {
+ cmd->Init(bucket_id,
+ offset,
+ size,
+ shared_memory_id,
+ shared_memory_offset);
+ }
+ }
+
+ void SetBucketDataImmediate(
+ uint32 bucket_id, uint32 offset, const void* data, uint32 size) {
+ cmd::SetBucketDataImmediate* cmd =
+ GetImmediateCmdSpace<cmd::SetBucketDataImmediate>(size);
+ if (cmd) {
+ cmd->Init(bucket_id, offset, size);
+ memcpy(ImmediateDataAddress(cmd), data, size);
+ }
+ }
+
+ void GetBucketStart(uint32 bucket_id,
+ uint32 result_memory_id,
+ uint32 result_memory_offset,
+ uint32 data_memory_size,
+ uint32 data_memory_id,
+ uint32 data_memory_offset) {
+ cmd::GetBucketStart* cmd = GetCmdSpace<cmd::GetBucketStart>();
+ if (cmd) {
+ cmd->Init(bucket_id,
+ result_memory_id,
+ result_memory_offset,
+ data_memory_size,
+ data_memory_id,
+ data_memory_offset);
+ }
+ }
+
+ void GetBucketData(uint32 bucket_id,
+ uint32 offset,
+ uint32 size,
+ uint32 shared_memory_id,
+ uint32 shared_memory_offset) {
+ cmd::GetBucketData* cmd = GetCmdSpace<cmd::GetBucketData>();
+ if (cmd) {
+ cmd->Init(bucket_id,
+ offset,
+ size,
+ shared_memory_id,
+ shared_memory_offset);
+ }
+ }
+
+ CommandBuffer* command_buffer() const {
+ return command_buffer_;
+ }
+
+ scoped_refptr<Buffer> get_ring_buffer() const { return ring_buffer_; }
+
+ uint32 flush_generation() const { return flush_generation_; }
+
+ void FreeRingBuffer();
+
+ bool HaveRingBuffer() const {
+ return ring_buffer_id_ != -1;
+ }
+
+ bool usable () const {
+ return usable_;
+ }
+
+ void ClearUsable() {
+ usable_ = false;
+ CalcImmediateEntries(0);
+ }
+
+ private:
+ // Returns the number of available entries (they may not be contiguous).
+ int32 AvailableEntries() {
+ return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
+ }
+
+ void CalcImmediateEntries(int waiting_count);
+ bool AllocateRingBuffer();
+ void FreeResources();
+
+ // Waits for the get offset to be in a specific range, inclusive. Returns
+ // false if there was an error.
+ bool WaitForGetOffsetInRange(int32 start, int32 end);
+
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+ // Calls Flush if automatic flush conditions are met.
+ void PeriodicFlushCheck();
+#endif
+
+ CommandBuffer* command_buffer_;
+ int32 ring_buffer_id_;
+ int32 ring_buffer_size_;
+ scoped_refptr<gpu::Buffer> ring_buffer_;
+ CommandBufferEntry* entries_;
+ int32 total_entry_count_; // the total number of entries
+ int32 immediate_entry_count_;
+ int32 token_;
+ int32 put_;
+ int32 last_put_sent_;
+
+#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
+ int commands_issued_;
+#endif
+
+ bool usable_;
+ bool context_lost_;
+ bool flush_automatically_;
+
+ base::TimeTicks last_flush_time_;
+
+ // Incremented every time the helper flushes the command buffer.
+ // Can be used to track when prior commands have been flushed.
+ uint32 flush_generation_;
+
+ friend class CommandBufferHelperTest;
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CMD_BUFFER_HELPER_H_
diff --git a/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/gpu/command_buffer/client/cmd_buffer_helper_test.cc
new file mode 100644
index 0000000..6250074
--- /dev/null
+++ b/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -0,0 +1,712 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the Command Buffer Helper.
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/linked_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+const int32 kTotalNumCommandEntries = 32;
+const int32 kCommandBufferSizeBytes =
+ kTotalNumCommandEntries * sizeof(CommandBufferEntry);
+const int32 kUnusedCommandId = 5; // we use 0 and 2 currently.
+
+// Override CommandBufferService::Flush() to lock flushing and simulate
+// the buffer becoming full in asynchronous mode.
+class CommandBufferServiceLocked : public CommandBufferService {
+ public:
+ explicit CommandBufferServiceLocked(
+ TransferBufferManagerInterface* transfer_buffer_manager)
+ : CommandBufferService(transfer_buffer_manager),
+ flush_locked_(false),
+ last_flush_(-1),
+ flush_count_(0) {}
+ virtual ~CommandBufferServiceLocked() {}
+
+ virtual void Flush(int32 put_offset) OVERRIDE {
+ flush_count_++;
+ if (!flush_locked_) {
+ last_flush_ = -1;
+ CommandBufferService::Flush(put_offset);
+ } else {
+ last_flush_ = put_offset;
+ }
+ }
+
+ void LockFlush() { flush_locked_ = true; }
+
+ void UnlockFlush() { flush_locked_ = false; }
+
+ int FlushCount() { return flush_count_; }
+
+ virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE {
+ if (last_flush_ != -1) {
+ CommandBufferService::Flush(last_flush_);
+ last_flush_ = -1;
+ }
+ CommandBufferService::WaitForGetOffsetInRange(start, end);
+ }
+
+ private:
+ bool flush_locked_;
+ int last_flush_;
+ int flush_count_;
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferServiceLocked);
+};
+
+// Test fixture for CommandBufferHelper test - Creates a CommandBufferHelper,
+// using a CommandBufferEngine with a mock AsyncAPIInterface for its interface
+// (calling it directly, not through the RPC mechanism).
+class CommandBufferHelperTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock(true));
+
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, _, _))
+ .WillRepeatedly(Return(error::kNoError));
+
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+ command_buffer_.reset(
+ new CommandBufferServiceLocked(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), api_mock_.get(), NULL));
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind(
+ &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+
+ api_mock_->set_engine(gpu_scheduler_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+
+ test_command_next_id_ = kUnusedCommandId;
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+ test_command_args_.clear();
+ }
+
+ const CommandParser* GetParser() const {
+ return gpu_scheduler_->parser();
+ }
+
+ int32 ImmediateEntryCount() const { return helper_->immediate_entry_count_; }
+
+ // Adds a command to the buffer through the helper, while adding it as an
+ // expected call on the API mock.
+ void AddCommandWithExpect(error::Error _return,
+ unsigned int command,
+ int arg_count,
+ CommandBufferEntry *args) {
+ CommandHeader header;
+ header.size = arg_count + 1;
+ header.command = command;
+ CommandBufferEntry* cmds =
+ static_cast<CommandBufferEntry*>(helper_->GetSpace(arg_count + 1));
+ CommandBufferOffset put = 0;
+ cmds[put++].value_header = header;
+ for (int ii = 0; ii < arg_count; ++ii) {
+ cmds[put++] = args[ii];
+ }
+
+ EXPECT_CALL(*api_mock_, DoCommand(command, arg_count,
+ Truly(AsyncAPIMock::IsArgs(arg_count, args))))
+ .InSequence(sequence_)
+ .WillOnce(Return(_return));
+ }
+
+ void AddUniqueCommandWithExpect(error::Error _return, int cmd_size) {
+ EXPECT_GE(cmd_size, 1);
+ EXPECT_LT(cmd_size, kTotalNumCommandEntries);
+ int arg_count = cmd_size - 1;
+
+ // Allocate array for args.
+ linked_ptr<std::vector<CommandBufferEntry> > args_ptr(
+ new std::vector<CommandBufferEntry>(arg_count ? arg_count : 1));
+
+ for (int32 ii = 0; ii < arg_count; ++ii) {
+ (*args_ptr)[ii].value_uint32 = 0xF00DF00D + ii;
+ }
+
+ // Add command and save args in test_command_args_ until the test completes.
+ AddCommandWithExpect(
+ _return, test_command_next_id_++, arg_count, &(*args_ptr)[0]);
+ test_command_args_.insert(test_command_args_.end(), args_ptr);
+ }
+
+ void TestCommandWrappingFull(int32 cmd_size, int32 start_commands) {
+ const int32 num_args = cmd_size - 1;
+ EXPECT_EQ(kTotalNumCommandEntries % cmd_size, 0);
+
+ std::vector<CommandBufferEntry> args(num_args);
+ for (int32 ii = 0; ii < num_args; ++ii) {
+ args[ii].value_uint32 = ii + 1;
+ }
+
+ // Initially insert commands up to start_commands and Finish().
+ for (int32 ii = 0; ii < start_commands; ++ii) {
+ AddCommandWithExpect(
+ error::kNoError, ii + kUnusedCommandId, num_args, &args[0]);
+ }
+ helper_->Finish();
+
+ EXPECT_EQ(GetParser()->put(),
+ (start_commands * cmd_size) % kTotalNumCommandEntries);
+ EXPECT_EQ(GetParser()->get(),
+ (start_commands * cmd_size) % kTotalNumCommandEntries);
+
+ // Lock flushing to force the buffer to get full.
+ command_buffer_->LockFlush();
+
+ // Add enough commands to over fill the buffer.
+ for (int32 ii = 0; ii < kTotalNumCommandEntries / cmd_size + 2; ++ii) {
+ AddCommandWithExpect(error::kNoError,
+ start_commands + ii + kUnusedCommandId,
+ num_args,
+ &args[0]);
+ }
+
+ // Flush all commands.
+ command_buffer_->UnlockFlush();
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+ }
+
+ // Checks that the buffer from put to put+size is free in the parser.
+ void CheckFreeSpace(CommandBufferOffset put, unsigned int size) {
+ CommandBufferOffset parser_put = GetParser()->put();
+ CommandBufferOffset parser_get = GetParser()->get();
+ CommandBufferOffset limit = put + size;
+ if (parser_get > parser_put) {
+ // "busy" buffer wraps, so "free" buffer is between put (inclusive) and
+ // get (exclusive).
+ EXPECT_LE(parser_put, put);
+ EXPECT_GT(parser_get, limit);
+ } else {
+ // "busy" buffer does not wrap, so the "free" buffer is the top side (from
+ // put to the limit) and the bottom side (from 0 to get).
+ if (put >= parser_put) {
+ // we're on the top side, check we are below the limit.
+ EXPECT_GE(kTotalNumCommandEntries, limit);
+ } else {
+ // we're on the bottom side, check we are below get.
+ EXPECT_GT(parser_get, limit);
+ }
+ }
+ }
+
+ int32 GetGetOffset() {
+ return command_buffer_->GetLastState().get_offset;
+ }
+
+ int32 GetPutOffset() {
+ return command_buffer_->GetLastState().put_offset;
+ }
+
+ int32 GetHelperGetOffset() { return helper_->get_offset(); }
+
+ int32 GetHelperPutOffset() { return helper_->put_; }
+
+ uint32 GetHelperFlushGeneration() { return helper_->flush_generation(); }
+
+ error::Error GetError() {
+ return command_buffer_->GetLastState().error;
+ }
+
+ CommandBufferOffset get_helper_put() { return helper_->put_; }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferServiceLocked> command_buffer_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ std::list<linked_ptr<std::vector<CommandBufferEntry> > > test_command_args_;
+ unsigned int test_command_next_id_;
+ Sequence sequence_;
+};
+
+// Checks immediate_entry_count_ changes based on 'usable' state.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesNotUsable) {
+ // Auto flushing mode is tested separately.
+ helper_->SetAutomaticFlushes(false);
+ EXPECT_EQ(helper_->usable(), true);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
+ helper_->ClearUsable();
+ EXPECT_EQ(ImmediateEntryCount(), 0);
+}
+
+// Checks immediate_entry_count_ changes based on RingBuffer state.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesNoRingBuffer) {
+ helper_->SetAutomaticFlushes(false);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
+ helper_->FreeRingBuffer();
+ EXPECT_EQ(ImmediateEntryCount(), 0);
+}
+
+// Checks immediate_entry_count_ calc when Put >= Get and Get == 0.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetAtZero) {
+ // No internal auto flushing.
+ helper_->SetAutomaticFlushes(false);
+ command_buffer_->LockFlush();
+
+ // Start at Get = Put = 0.
+ EXPECT_EQ(GetHelperPutOffset(), 0);
+ EXPECT_EQ(GetHelperGetOffset(), 0);
+
+ // Immediate count should be 1 less than the end of the buffer.
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 3);
+
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks immediate_entry_count_ calc when Put >= Get and Get > 0.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetInMiddle) {
+ // No internal auto flushing.
+ helper_->SetAutomaticFlushes(false);
+ command_buffer_->LockFlush();
+
+ // Move to Get = Put = 2.
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ helper_->Finish();
+ EXPECT_EQ(GetHelperPutOffset(), 2);
+ EXPECT_EQ(GetHelperGetOffset(), 2);
+
+ // Immediate count should be up to the end of the buffer.
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 2);
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 4);
+
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks immediate_entry_count_ calc when Put < Get.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetBeforePut) {
+ // Move to Get = kTotalNumCommandEntries / 4, Put = 0.
+ const int kInitGetOffset = kTotalNumCommandEntries / 4;
+ helper_->SetAutomaticFlushes(false);
+ command_buffer_->LockFlush();
+ AddUniqueCommandWithExpect(error::kNoError, kInitGetOffset);
+ helper_->Finish();
+ AddUniqueCommandWithExpect(error::kNoError,
+ kTotalNumCommandEntries - kInitGetOffset);
+
+ // Flush instead of Finish will let Put wrap without the command buffer
+ // immediately processing the data between Get and Put.
+ helper_->Flush();
+
+ EXPECT_EQ(GetHelperGetOffset(), kInitGetOffset);
+ EXPECT_EQ(GetHelperPutOffset(), 0);
+
+ // Immediate count should be up to Get - 1.
+ EXPECT_EQ(ImmediateEntryCount(), kInitGetOffset - 1);
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ EXPECT_EQ(ImmediateEntryCount(), kInitGetOffset - 3);
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks immediate_entry_count_ calc when automatic flushing is enabled.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesAutoFlushing) {
+ command_buffer_->LockFlush();
+
+ // Start at Get = Put = 0.
+ EXPECT_EQ(GetHelperPutOffset(), 0);
+ EXPECT_EQ(GetHelperGetOffset(), 0);
+
+ // Without auto flushes, up to kTotalNumCommandEntries - 1 is available.
+ helper_->SetAutomaticFlushes(false);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1);
+
+ // With auto flushes, and Get == Last Put,
+ // up to kTotalNumCommandEntries / kAutoFlushSmall is available.
+ helper_->SetAutomaticFlushes(true);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushSmall);
+
+ // With auto flushes, and Get != Last Put,
+ // up to kTotalNumCommandEntries / kAutoFlushBig is available.
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ helper_->Flush();
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushBig);
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks immediate_entry_count_ calc when automatic flushing is enabled, and
+// we allocate commands over the immediate_entry_count_ size.
+TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesOverFlushLimit) {
+ // Lock internal flushing.
+ command_buffer_->LockFlush();
+
+ // Start at Get = Put = 0.
+ EXPECT_EQ(GetHelperPutOffset(), 0);
+ EXPECT_EQ(GetHelperGetOffset(), 0);
+
+ // Pre-check ImmediateEntryCount is limited with automatic flushing enabled.
+ helper_->SetAutomaticFlushes(true);
+ EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushSmall);
+
+ // Add a command larger than ImmediateEntryCount().
+ AddUniqueCommandWithExpect(error::kNoError, ImmediateEntryCount() + 1);
+
+ // ImmediateEntryCount() should now be 0, to force a flush check on the next
+ // command.
+ EXPECT_EQ(ImmediateEntryCount(), 0);
+
+ // Add a command when ImmediateEntryCount() == 0.
+ AddUniqueCommandWithExpect(error::kNoError, ImmediateEntryCount() + 1);
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks that commands in the buffer are properly executed, and that the
+// status/error stay valid.
+TEST_F(CommandBufferHelperTest, TestCommandProcessing) {
+ // Check initial state of the engine - it should have been configured by the
+ // helper.
+ EXPECT_TRUE(GetParser() != NULL);
+ EXPECT_EQ(error::kNoError, GetError());
+ EXPECT_EQ(0, GetGetOffset());
+
+ // Add 3 commands through the helper
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 0, NULL);
+
+ CommandBufferEntry args1[2];
+ args1[0].value_uint32 = 3;
+ args1[1].value_float = 4.f;
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 2, args1);
+
+ CommandBufferEntry args2[2];
+ args2[0].value_uint32 = 5;
+ args2[1].value_float = 6.f;
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 2, args2);
+
+ // Wait until it's done.
+ helper_->Finish();
+ // Check that the engine has no more work to do.
+ EXPECT_TRUE(GetParser()->IsEmpty());
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks that commands in the buffer are properly executed when wrapping the
+// buffer, and that the status/error stay valid.
+TEST_F(CommandBufferHelperTest, TestCommandWrapping) {
+ // Add num_commands * commands of size 3 through the helper to make sure we
+ // do wrap. kTotalNumCommandEntries must not be a multiple of 3.
+ COMPILE_ASSERT(kTotalNumCommandEntries % 3 != 0,
+ Is_multiple_of_num_command_entries);
+ const int kNumCommands = (kTotalNumCommandEntries / 3) * 2;
+ CommandBufferEntry args1[2];
+ args1[0].value_uint32 = 5;
+ args1[1].value_float = 4.f;
+
+ for (int i = 0; i < kNumCommands; ++i) {
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + i, 2, args1);
+ }
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks the case where the command inserted exactly matches the space left in
+// the command buffer.
+TEST_F(CommandBufferHelperTest, TestCommandWrappingExactMultiple) {
+ const int32 kCommandSize = kTotalNumCommandEntries / 2;
+ const size_t kNumArgs = kCommandSize - 1;
+ COMPILE_ASSERT(kTotalNumCommandEntries % kCommandSize == 0,
+ Not_multiple_of_num_command_entries);
+ CommandBufferEntry args1[kNumArgs];
+ for (size_t ii = 0; ii < kNumArgs; ++ii) {
+ args1[ii].value_uint32 = ii + 1;
+ }
+
+ for (unsigned int i = 0; i < 5; ++i) {
+ AddCommandWithExpect(
+ error::kNoError, i + kUnusedCommandId, kNumArgs, args1);
+ }
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks exact wrapping condition with Get = 0.
+TEST_F(CommandBufferHelperTest, TestCommandWrappingFullAtStart) {
+ TestCommandWrappingFull(2, 0);
+}
+
+// Checks exact wrapping condition with 0 < Get < kTotalNumCommandEntries.
+TEST_F(CommandBufferHelperTest, TestCommandWrappingFullInMiddle) {
+ TestCommandWrappingFull(2, 1);
+}
+
+// Checks exact wrapping condition with Get = kTotalNumCommandEntries.
+// Get should wrap back to 0, but making sure.
+TEST_F(CommandBufferHelperTest, TestCommandWrappingFullAtEnd) {
+ TestCommandWrappingFull(2, kTotalNumCommandEntries / 2);
+}
+
+// Checks that asking for available entries work, and that the parser
+// effectively won't use that space.
+TEST_F(CommandBufferHelperTest, TestAvailableEntries) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Add 2 commands through the helper - 8 entries
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 1, 0, NULL);
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 2, 0, NULL);
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 3, 2, args);
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 4, 2, args);
+
+ // Ask for 5 entries.
+ helper_->WaitForAvailableEntries(5);
+
+ CommandBufferOffset put = get_helper_put();
+ CheckFreeSpace(put, 5);
+
+ // Add more commands.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 5, 2, args);
+
+ // Wait until everything is done done.
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks that the InsertToken/WaitForToken work.
+TEST_F(CommandBufferHelperTest, TestToken) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Add a first command.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 3, 2, args);
+ // keep track of the buffer position.
+ CommandBufferOffset command1_put = get_helper_put();
+ int32 token = helper_->InsertToken();
+
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillOnce(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+ // Add another command.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 4, 2, args);
+ helper_->WaitForToken(token);
+ // check that the get pointer is beyond the first command.
+ EXPECT_LE(command1_put, GetGetOffset());
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+// Checks WaitForToken doesn't Flush if token is already read.
+TEST_F(CommandBufferHelperTest, TestWaitForTokenFlush) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Add a first command.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 3, 2, args);
+ int32 token = helper_->InsertToken();
+
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillOnce(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+
+ int flush_count = command_buffer_->FlushCount();
+
+ // Test that waiting for pending token causes a Flush.
+ helper_->WaitForToken(token);
+ EXPECT_EQ(command_buffer_->FlushCount(), flush_count + 1);
+
+ // Test that we don't Flush repeatedly.
+ helper_->WaitForToken(token);
+ EXPECT_EQ(command_buffer_->FlushCount(), flush_count + 1);
+
+ // Add another command.
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId + 4, 2, args);
+
+ // Test that we don't Flush repeatedly even if commands are pending.
+ helper_->WaitForToken(token);
+ EXPECT_EQ(command_buffer_->FlushCount(), flush_count + 1);
+
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+TEST_F(CommandBufferHelperTest, FreeRingBuffer) {
+ EXPECT_TRUE(helper_->HaveRingBuffer());
+
+ // Test freeing ring buffer.
+ helper_->FreeRingBuffer();
+ EXPECT_FALSE(helper_->HaveRingBuffer());
+
+ // Test that InsertToken allocates a new one
+ int32 token = helper_->InsertToken();
+ EXPECT_TRUE(helper_->HaveRingBuffer());
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillOnce(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+ helper_->WaitForToken(token);
+ helper_->FreeRingBuffer();
+ EXPECT_FALSE(helper_->HaveRingBuffer());
+
+ // Test that WaitForAvailableEntries allocates a new one
+ AddCommandWithExpect(error::kNoError, kUnusedCommandId, 0, NULL);
+ EXPECT_TRUE(helper_->HaveRingBuffer());
+ helper_->Finish();
+ helper_->FreeRingBuffer();
+ EXPECT_FALSE(helper_->HaveRingBuffer());
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+}
+
+TEST_F(CommandBufferHelperTest, Noop) {
+ for (int ii = 1; ii < 4; ++ii) {
+ CommandBufferOffset put_before = get_helper_put();
+ helper_->Noop(ii);
+ CommandBufferOffset put_after = get_helper_put();
+ EXPECT_EQ(ii, put_after - put_before);
+ }
+}
+
+TEST_F(CommandBufferHelperTest, IsContextLost) {
+ EXPECT_FALSE(helper_->IsContextLost());
+ command_buffer_->SetParseError(error::kGenericError);
+ EXPECT_TRUE(helper_->IsContextLost());
+}
+
+// Checks helper's 'flush generation' updates.
+TEST_F(CommandBufferHelperTest, TestFlushGeneration) {
+ // Explicit flushing only.
+ helper_->SetAutomaticFlushes(false);
+
+ // Generation should change after Flush() but not before.
+ uint32 gen1, gen2, gen3;
+
+ gen1 = GetHelperFlushGeneration();
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ gen2 = GetHelperFlushGeneration();
+ helper_->Flush();
+ gen3 = GetHelperFlushGeneration();
+ EXPECT_EQ(gen2, gen1);
+ EXPECT_NE(gen3, gen2);
+
+ // Generation should change after Finish() but not before.
+ gen1 = GetHelperFlushGeneration();
+ AddUniqueCommandWithExpect(error::kNoError, 2);
+ gen2 = GetHelperFlushGeneration();
+ helper_->Finish();
+ gen3 = GetHelperFlushGeneration();
+ EXPECT_EQ(gen2, gen1);
+ EXPECT_NE(gen3, gen2);
+
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_EQ(error::kNoError, GetError());
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/context_support.h b/gpu/command_buffer/client/context_support.h
new file mode 100644
index 0000000..2678ba9
--- /dev/null
+++ b/gpu/command_buffer/client/context_support.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CONTEXT_SUPPORT_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CONTEXT_SUPPORT_H_
+
+#include "base/callback.h"
+#include "ui/gfx/overlay_transform.h"
+#include "ui/gfx/rect.h"
+
+namespace gpu {
+
+class ContextSupport {
+ public:
+ // Runs |callback| when a sync point is reached.
+ virtual void SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) = 0;
+
+ // Runs |callback| when a query created via glCreateQueryEXT() has cleared
+ // passed the glEndQueryEXT() point.
+ virtual void SignalQuery(uint32 query, const base::Closure& callback) = 0;
+
+ // For onscreen contexts, indicates that the surface visibility has changed.
+ // Clients aren't expected to draw to an invisible surface.
+ virtual void SetSurfaceVisible(bool visible) = 0;
+
+ virtual void Swap() = 0;
+ virtual void PartialSwapBuffers(const gfx::Rect& sub_buffer) = 0;
+
+ // Schedule a texture to be presented as an overlay synchronously with the
+ // primary surface during the next buffer swap.
+ // This method is not stateful and needs to be re-scheduled every frame.
+ virtual void ScheduleOverlayPlane(int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect) = 0;
+
+ virtual uint32 InsertFutureSyncPointCHROMIUM() = 0;
+ virtual void RetireSyncPointCHROMIUM(uint32 sync_point) = 0;
+
+ protected:
+ ContextSupport() {}
+ virtual ~ContextSupport() {}
+};
+
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CONTEXT_SUPPORT_H_
diff --git a/gpu/command_buffer/client/fenced_allocator.cc b/gpu/command_buffer/client/fenced_allocator.cc
new file mode 100644
index 0000000..8003857
--- /dev/null
+++ b/gpu/command_buffer/client/fenced_allocator.cc
@@ -0,0 +1,253 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the FencedAllocator class.
+
+#include "gpu/command_buffer/client/fenced_allocator.h"
+
+#include <algorithm>
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+namespace {
+
+// Allocation alignment, must be a power of two.
+const unsigned int kAllocAlignment = 16;
+
+// Round down to the largest multiple of kAllocAlignment no greater than |size|.
+unsigned int RoundDown(unsigned int size) {
+ return size & ~(kAllocAlignment - 1);
+}
+
+// Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
+unsigned int RoundUp(unsigned int size) {
+ return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
+}
+
+} // namespace
+
+#ifndef _MSC_VER
+const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
+#endif
+
+FencedAllocator::FencedAllocator(unsigned int size,
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback)
+ : helper_(helper),
+ poll_callback_(poll_callback),
+ bytes_in_use_(0) {
+ Block block = { FREE, 0, RoundDown(size), kUnusedToken };
+ blocks_.push_back(block);
+}
+
+FencedAllocator::~FencedAllocator() {
+ // Free blocks pending tokens.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ if (blocks_[i].state == FREE_PENDING_TOKEN) {
+ i = WaitForTokenAndFreeBlock(i);
+ }
+ }
+
+ DCHECK_EQ(blocks_.size(), 1u);
+ DCHECK_EQ(blocks_[0].state, FREE);
+}
+
+// Looks for a non-allocated block that is big enough. Search in the FREE
+// blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
+// blocks, waiting for them. The current implementation isn't smart about
+// optimizing what to wait for, just looks inside the block in order (first-fit
+// as well).
+FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
+ // size of 0 is not allowed because it would be inconsistent to only sometimes
+ // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
+ if (size == 0) {
+ return kInvalidOffset;
+ }
+
+ // Round up the allocation size to ensure alignment.
+ size = RoundUp(size);
+
+ // Try first to allocate in a free block.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == FREE && block.size >= size) {
+ return AllocInBlock(i, size);
+ }
+ }
+
+ // No free block is available. Look for blocks pending tokens, and wait for
+ // them to be re-usable.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ if (blocks_[i].state != FREE_PENDING_TOKEN)
+ continue;
+ i = WaitForTokenAndFreeBlock(i);
+ if (blocks_[i].size >= size)
+ return AllocInBlock(i, size);
+ }
+ return kInvalidOffset;
+}
+
+// Looks for the corresponding block, mark it FREE, and collapse it if
+// necessary.
+void FencedAllocator::Free(FencedAllocator::Offset offset) {
+ BlockIndex index = GetBlockByOffset(offset);
+ DCHECK_NE(blocks_[index].state, FREE);
+ Block &block = blocks_[index];
+
+ if (block.state == IN_USE)
+ bytes_in_use_ -= block.size;
+
+ block.state = FREE;
+ CollapseFreeBlock(index);
+}
+
+// Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
+void FencedAllocator::FreePendingToken(
+ FencedAllocator::Offset offset, int32 token) {
+ BlockIndex index = GetBlockByOffset(offset);
+ Block &block = blocks_[index];
+ if (block.state == IN_USE)
+ bytes_in_use_ -= block.size;
+ block.state = FREE_PENDING_TOKEN;
+ block.token = token;
+}
+
+// Gets the max of the size of the blocks marked as free.
+unsigned int FencedAllocator::GetLargestFreeSize() {
+ FreeUnused();
+ unsigned int max_size = 0;
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == FREE)
+ max_size = std::max(max_size, block.size);
+ }
+ return max_size;
+}
+
+// Gets the size of the largest segment of blocks that are either FREE or
+// FREE_PENDING_TOKEN.
+unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
+ unsigned int max_size = 0;
+ unsigned int current_size = 0;
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == IN_USE) {
+ max_size = std::max(max_size, current_size);
+ current_size = 0;
+ } else {
+ DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
+ current_size += block.size;
+ }
+ }
+ return std::max(max_size, current_size);
+}
+
+// Makes sure that:
+// - there is at least one block.
+// - there are no contiguous FREE blocks (they should have been collapsed).
+// - the successive offsets match the block sizes, and they are in order.
+bool FencedAllocator::CheckConsistency() {
+ if (blocks_.size() < 1) return false;
+ for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
+ Block ¤t = blocks_[i];
+ Block &next = blocks_[i + 1];
+ // This test is NOT included in the next one, because offset is unsigned.
+ if (next.offset <= current.offset)
+ return false;
+ if (next.offset != current.offset + current.size)
+ return false;
+ if (current.state == FREE && next.state == FREE)
+ return false;
+ }
+ return true;
+}
+
+// Returns false if all blocks are actually FREE, in which
+// case they would be coalesced into one block, true otherwise.
+bool FencedAllocator::InUse() {
+ return blocks_.size() != 1 || blocks_[0].state != FREE;
+}
+
+// Collapse the block to the next one, then to the previous one. Provided the
+// structure is consistent, those are the only blocks eligible for collapse.
+FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
+ BlockIndex index) {
+ if (index + 1 < blocks_.size()) {
+ Block &next = blocks_[index + 1];
+ if (next.state == FREE) {
+ blocks_[index].size += next.size;
+ blocks_.erase(blocks_.begin() + index + 1);
+ }
+ }
+ if (index > 0) {
+ Block &prev = blocks_[index - 1];
+ if (prev.state == FREE) {
+ prev.size += blocks_[index].size;
+ blocks_.erase(blocks_.begin() + index);
+ --index;
+ }
+ }
+ return index;
+}
+
+// Waits for the block's token, then mark the block as free, then collapse it.
+FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
+ BlockIndex index) {
+ Block &block = blocks_[index];
+ DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
+ helper_->WaitForToken(block.token);
+ block.state = FREE;
+ return CollapseFreeBlock(index);
+}
+
+// Frees any blocks pending a token for which the token has been read.
+void FencedAllocator::FreeUnused() {
+ // Free any potential blocks that has its lifetime handled outside.
+ poll_callback_.Run();
+
+ for (unsigned int i = 0; i < blocks_.size();) {
+ Block& block = blocks_[i];
+ if (block.state == FREE_PENDING_TOKEN &&
+ helper_->HasTokenPassed(block.token)) {
+ block.state = FREE;
+ i = CollapseFreeBlock(i);
+ } else {
+ ++i;
+ }
+ }
+}
+
+// If the block is exactly the requested size, simply mark it IN_USE, otherwise
+// split it and mark the first one (of the requested size) IN_USE.
+FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
+ unsigned int size) {
+ Block &block = blocks_[index];
+ DCHECK_GE(block.size, size);
+ DCHECK_EQ(block.state, FREE);
+ Offset offset = block.offset;
+ bytes_in_use_ += size;
+ if (block.size == size) {
+ block.state = IN_USE;
+ return offset;
+ }
+ Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
+ block.state = IN_USE;
+ block.size = size;
+ // this is the last thing being done because it may invalidate block;
+ blocks_.insert(blocks_.begin() + index + 1, newblock);
+ return offset;
+}
+
+// The blocks are in offset order, so we can do a binary search.
+FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
+ Block templ = { IN_USE, offset, 0, kUnusedToken };
+ Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
+ templ, OffsetCmp());
+ DCHECK(it != blocks_.end() && it->offset == offset);
+ return it-blocks_.begin();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/fenced_allocator.h b/gpu/command_buffer/client/fenced_allocator.h
new file mode 100644
index 0000000..8e222e1
--- /dev/null
+++ b/gpu/command_buffer/client/fenced_allocator.h
@@ -0,0 +1,266 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the definition of the FencedAllocator class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_FENCED_ALLOCATOR_H_
+#define GPU_COMMAND_BUFFER_CLIENT_FENCED_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+class CommandBufferHelper;
+
+// FencedAllocator provides a mechanism to manage allocations within a fixed
+// block of memory (storing the book-keeping externally). Furthermore this
+// class allows to free data "pending" the passage of a command buffer token,
+// that is, the memory won't be reused until the command buffer has processed
+// that token.
+//
+// NOTE: Although this class is intended to be used in the command buffer
+// environment which is multi-process, this class isn't "thread safe", because
+// it isn't meant to be shared across modules. It is thread-compatible though
+// (see http://www.corp.google.com/eng/doc/cpp_primer.html#thread_safety).
+class GPU_EXPORT FencedAllocator {
+ public:
+ typedef unsigned int Offset;
+ // Invalid offset, returned by Alloc in case of failure.
+ static const Offset kInvalidOffset = 0xffffffffU;
+
+ // Creates a FencedAllocator. Note that the size of the buffer is passed, but
+ // not its base address: everything is handled as offsets into the buffer.
+ FencedAllocator(unsigned int size,
+ CommandBufferHelper *helper,
+ const base::Closure& poll_callback);
+
+ ~FencedAllocator();
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the offset of the allocated memory block, or kInvalidOffset if out of
+ // memory.
+ Offset Alloc(unsigned int size);
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // offset: the offset of the memory block to free.
+ void Free(Offset offset);
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // offset: the offset of the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(Offset offset, int32 token);
+
+ // Frees any blocks pending a token for which the token has been read.
+ void FreeUnused();
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSize();
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait. Allocating a block of this size will succeed, but may
+ // block.
+ unsigned int GetLargestFreeOrPendingSize();
+
+ // Checks for consistency inside the book-keeping structures. Used for
+ // testing.
+ bool CheckConsistency();
+
+ // True if any memory is allocated.
+ bool InUse();
+
+ // Return bytes of memory that is IN_USE
+ size_t bytes_in_use() const { return bytes_in_use_; }
+
+ private:
+ // Status of a block of memory, for book-keeping.
+ enum State {
+ IN_USE,
+ FREE,
+ FREE_PENDING_TOKEN
+ };
+
+ // Book-keeping sturcture that describes a block of memory.
+ struct Block {
+ State state;
+ Offset offset;
+ unsigned int size;
+ int32_t token; // token to wait for in the FREE_PENDING_TOKEN case.
+ };
+
+ // Comparison functor for memory block sorting.
+ class OffsetCmp {
+ public:
+ bool operator() (const Block &left, const Block &right) {
+ return left.offset < right.offset;
+ }
+ };
+
+ typedef std::vector<Block> Container;
+ typedef unsigned int BlockIndex;
+
+ static const int32_t kUnusedToken = 0;
+
+ // Gets the index of a memory block, given its offset.
+ BlockIndex GetBlockByOffset(Offset offset);
+
+ // Collapse a free block with its neighbours if they are free. Returns the
+ // index of the collapsed block.
+ // NOTE: this will invalidate block indices.
+ BlockIndex CollapseFreeBlock(BlockIndex index);
+
+ // Waits for a FREE_PENDING_TOKEN block to be usable, and free it. Returns
+ // the new index of that block (since it may have been collapsed).
+ // NOTE: this will invalidate block indices.
+ BlockIndex WaitForTokenAndFreeBlock(BlockIndex index);
+
+ // Allocates a block of memory inside a given block, splitting it in two
+ // (unless that block is of the exact requested size).
+ // NOTE: this will invalidate block indices.
+ // Returns the offset of the allocated block (NOTE: this is different from
+ // the other functions that return a block index).
+ Offset AllocInBlock(BlockIndex index, unsigned int size);
+
+ CommandBufferHelper *helper_;
+ base::Closure poll_callback_;
+ Container blocks_;
+ size_t bytes_in_use_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocator);
+};
+
+// This class functions just like FencedAllocator, but its API uses pointers
+// instead of offsets.
+class FencedAllocatorWrapper {
+ public:
+ FencedAllocatorWrapper(unsigned int size,
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
+ void* base)
+ : allocator_(size, helper, poll_callback),
+ base_(base) { }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ void *Alloc(unsigned int size) {
+ FencedAllocator::Offset offset = allocator_.Alloc(size);
+ return GetPointer(offset);
+ }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ // This is a type-safe version of Alloc, returning a typed pointer.
+ //
+ // Parameters:
+ // count: the number of elements to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ template <typename T> T *AllocTyped(unsigned int count) {
+ return static_cast<T *>(Alloc(count * sizeof(T)));
+ }
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ void Free(void *pointer) {
+ DCHECK(pointer);
+ allocator_.Free(GetOffset(pointer));
+ }
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void *pointer, int32 token) {
+ DCHECK(pointer);
+ allocator_.FreePendingToken(GetOffset(pointer), token);
+ }
+
+ // Frees any blocks pending a token for which the token has been read.
+ void FreeUnused() {
+ allocator_.FreeUnused();
+ }
+
+ // Gets a pointer to a memory block given the base memory and the offset.
+ // It translates FencedAllocator::kInvalidOffset to NULL.
+ void *GetPointer(FencedAllocator::Offset offset) {
+ return (offset == FencedAllocator::kInvalidOffset) ?
+ NULL : static_cast<char *>(base_) + offset;
+ }
+
+ // Gets the offset to a memory block given the base memory and the address.
+ // It translates NULL to FencedAllocator::kInvalidOffset.
+ FencedAllocator::Offset GetOffset(void *pointer) {
+ return pointer ?
+ static_cast<FencedAllocator::Offset>(
+ static_cast<char*>(pointer) - static_cast<char*>(base_)) :
+ FencedAllocator::kInvalidOffset;
+ }
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSize() {
+ return allocator_.GetLargestFreeSize();
+ }
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait.
+ unsigned int GetLargestFreeOrPendingSize() {
+ return allocator_.GetLargestFreeOrPendingSize();
+ }
+
+ // Checks for consistency inside the book-keeping structures. Used for
+ // testing.
+ bool CheckConsistency() {
+ return allocator_.CheckConsistency();
+ }
+
+ // True if any memory is allocated.
+ bool InUse() {
+ return allocator_.InUse();
+ }
+
+ FencedAllocator &allocator() { return allocator_; }
+
+ size_t bytes_in_use() const { return allocator_.bytes_in_use(); }
+
+ private:
+ FencedAllocator allocator_;
+ void* base_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocatorWrapper);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_FENCED_ALLOCATOR_H_
diff --git a/gpu/command_buffer/client/fenced_allocator_test.cc b/gpu/command_buffer/client/fenced_allocator_test.cc
new file mode 100644
index 0000000..e746be6
--- /dev/null
+++ b/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -0,0 +1,645 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the tests for the FencedAllocator class.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/aligned_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/client/fenced_allocator.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::InvokeWithoutArgs;
+using testing::_;
+
+class BaseFencedAllocatorTest : public testing::Test {
+ protected:
+ static const unsigned int kBufferSize = 1024;
+ static const int kAllocAlignment = 16;
+
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock(true));
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
+ .WillRepeatedly(Return(error::kNoError));
+ // Forward the SetToken calls to the engine
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+ command_buffer_.reset(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), api_mock_.get(), NULL));
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind(
+ &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+
+ api_mock_->set_engine(gpu_scheduler_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize(kBufferSize);
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetLastState().token;
+ }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<CommandBufferHelper> helper_;
+};
+
+#ifndef _MSC_VER
+const unsigned int BaseFencedAllocatorTest::kBufferSize;
+#endif
+
+namespace {
+void EmptyPoll() {
+}
+}
+
+// Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
+// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
+// it directly, not through the RPC mechanism), making sure Noops are ignored
+// and SetToken are properly forwarded to the engine.
+class FencedAllocatorTest : public BaseFencedAllocatorTest {
+ protected:
+ virtual void SetUp() {
+ BaseFencedAllocatorTest::SetUp();
+ allocator_.reset(new FencedAllocator(kBufferSize,
+ helper_.get(),
+ base::Bind(&EmptyPoll)));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+
+ scoped_ptr<FencedAllocator> allocator_;
+};
+
+// Checks basic alloc and free.
+TEST_F(FencedAllocatorTest, TestBasic) {
+ allocator_->CheckConsistency();
+ EXPECT_FALSE(allocator_->InUse());
+
+ const unsigned int kSize = 16;
+ FencedAllocator::Offset offset = allocator_->Alloc(kSize);
+ EXPECT_TRUE(allocator_->InUse());
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_GE(kBufferSize, offset+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(offset);
+ EXPECT_FALSE(allocator_->InUse());
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Test alloc 0 fails.
+TEST_F(FencedAllocatorTest, TestAllocZero) {
+ FencedAllocator::Offset offset = allocator_->Alloc(0);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_FALSE(allocator_->InUse());
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Checks out-of-memory condition.
+TEST_F(FencedAllocatorTest, TestOutOfMemory) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ FencedAllocator::Offset offsets[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ offsets[i] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
+ EXPECT_GE(kBufferSize, offsets[i]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, reallocate with half the size
+ allocator_->Free(offsets[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ offsets[0] = allocator_->Alloc(kSize/2);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
+ EXPECT_GE(kBufferSize, offsets[0]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // This allocation should fail as well.
+ offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(offsets[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(FencedAllocatorTest, TestFreePendingToken) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ FencedAllocator::Offset offsets[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ offsets[i] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
+ EXPECT_GE(kBufferSize, offsets[i]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[0], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed.
+ offsets[0] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
+ EXPECT_GE(kBufferSize, offsets[0]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, GetToken());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(offsets[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Checks the free-pending-token mechanism using FreeUnused
+TEST_F(FencedAllocatorTest, FreeUnused) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ FencedAllocator::Offset offsets[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ offsets[i] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
+ EXPECT_GE(kBufferSize, offsets[i]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+ EXPECT_TRUE(allocator_->InUse());
+
+ // No memory should be available.
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[0], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Force the command buffer to process the token.
+ helper_->Finish();
+
+ // Tell the allocator to update what's available based on the current token.
+ allocator_->FreeUnused();
+
+ // Check that the new largest free size takes into account the unused block.
+ EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
+
+ // Free two more.
+ token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[1], token);
+ token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[2], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Check that nothing has changed.
+ EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
+
+ // Force the command buffer to process the token.
+ helper_->Finish();
+
+ // Tell the allocator to update what's available based on the current token.
+ allocator_->FreeUnused();
+
+ // Check that the new largest free size takes into account the unused blocks.
+ EXPECT_EQ(kSize * 3, allocator_->GetLargestFreeSize());
+ EXPECT_TRUE(allocator_->InUse());
+
+ // Free up everything.
+ for (unsigned int i = 3; i < kAllocCount; ++i) {
+ allocator_->Free(offsets[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+ EXPECT_FALSE(allocator_->InUse());
+}
+
+// Tests GetLargestFreeSize
+TEST_F(FencedAllocatorTest, TestGetLargestFreeSize) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+
+ FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
+ allocator_->Free(offset);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+
+ const unsigned int kSize = 16;
+ offset = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // The following checks that the buffer is allocated "smartly" - which is
+ // dependent on the implementation. But both first-fit or best-fit would
+ // ensure that.
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSize());
+
+ // Allocate 2 more buffers (now 3), and then free the first two. This is to
+ // ensure a hole. Note that this is dependent on the first-fit current
+ // implementation.
+ FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ EXPECT_EQ(kBufferSize - 3 * kSize, allocator_->GetLargestFreeSize());
+
+ offset = allocator_->Alloc(kBufferSize - 3 * kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(2 * kSize, allocator_->GetLargestFreeSize());
+
+ offset1 = allocator_->Alloc(2 * kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
+
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ allocator_->Free(offset2);
+}
+
+// Tests GetLargestFreeOrPendingSize
+TEST_F(FencedAllocatorTest, TestGetLargestFreeOrPendingSize) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+
+ FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeOrPendingSize());
+ allocator_->Free(offset);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+
+ const unsigned int kSize = 16;
+ offset = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // The following checks that the buffer is allocates "smartly" - which is
+ // dependent on the implementation. But both first-fit or best-fit would
+ // ensure that.
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeOrPendingSize());
+
+ // Allocate 2 more buffers (now 3), and then free the first two. This is to
+ // ensure a hole. Note that this is dependent on the first-fit current
+ // implementation.
+ FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ EXPECT_EQ(kBufferSize - 3 * kSize,
+ allocator_->GetLargestFreeOrPendingSize());
+
+ // Free the last one, pending a token.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offset2, token);
+
+ // Now all the buffers have been freed...
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ // .. but one is still waiting for the token.
+ EXPECT_EQ(kBufferSize - 3 * kSize,
+ allocator_->GetLargestFreeSize());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed, but it will succeed.
+ offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, GetToken());
+ allocator_->Free(offset);
+
+ // Everything now has been freed...
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ // ... for real.
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+}
+
+class FencedAllocatorPollTest : public BaseFencedAllocatorTest {
+ public:
+ static const unsigned int kAllocSize = 128;
+
+ MOCK_METHOD0(MockedPoll, void());
+
+ protected:
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+};
+
+TEST_F(FencedAllocatorPollTest, TestPoll) {
+ scoped_ptr<FencedAllocator> allocator(
+ new FencedAllocator(kBufferSize,
+ helper_.get(),
+ base::Bind(&FencedAllocatorPollTest::MockedPoll,
+ base::Unretained(this))));
+
+ FencedAllocator::Offset mem1 = allocator->Alloc(kAllocSize);
+ FencedAllocator::Offset mem2 = allocator->Alloc(kAllocSize);
+ EXPECT_NE(mem1, FencedAllocator::kInvalidOffset);
+ EXPECT_NE(mem2, FencedAllocator::kInvalidOffset);
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 2);
+
+ // Check that no-op Poll doesn't affect the state.
+ EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 2);
+
+ // Check that freeing in Poll works.
+ base::Closure free_mem1_closure =
+ base::Bind(&FencedAllocator::Free,
+ base::Unretained(allocator.get()),
+ mem1);
+ EXPECT_CALL(*this, MockedPoll())
+ .WillOnce(InvokeWithoutArgs(&free_mem1_closure, &base::Closure::Run))
+ .RetiresOnSaturation();
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 1);
+
+ // Check that freeing still works.
+ EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
+ allocator->Free(mem2);
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), 0u);
+
+ allocator.reset();
+}
+
+// Test fixture for FencedAllocatorWrapper test - Creates a
+// FencedAllocatorWrapper, using a CommandBufferHelper with a mock
+// AsyncAPIInterface for its interface (calling it directly, not through the
+// RPC mechanism), making sure Noops are ignored and SetToken are properly
+// forwarded to the engine.
+class FencedAllocatorWrapperTest : public BaseFencedAllocatorTest {
+ protected:
+ virtual void SetUp() {
+ BaseFencedAllocatorTest::SetUp();
+
+ // Though allocating this buffer isn't strictly necessary, it makes
+ // allocations point to valid addresses, so they could be used for
+ // something.
+ buffer_.reset(static_cast<char*>(base::AlignedAlloc(
+ kBufferSize, kAllocAlignment)));
+ allocator_.reset(new FencedAllocatorWrapper(kBufferSize,
+ helper_.get(),
+ base::Bind(&EmptyPoll),
+ buffer_.get()));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+
+ scoped_ptr<FencedAllocatorWrapper> allocator_;
+ scoped_ptr<char, base::AlignedFreeDeleter> buffer_;
+};
+
+// Checks basic alloc and free.
+TEST_F(FencedAllocatorWrapperTest, TestBasic) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ void *pointer = allocator_->Alloc(kSize);
+ ASSERT_TRUE(pointer);
+ EXPECT_LE(buffer_.get(), static_cast<char *>(pointer));
+ EXPECT_GE(kBufferSize, static_cast<char *>(pointer) - buffer_.get() + kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(pointer);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ char *pointer_char = allocator_->AllocTyped<char>(kSize);
+ ASSERT_TRUE(pointer_char);
+ EXPECT_LE(buffer_.get(), pointer_char);
+ EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
+ allocator_->Free(pointer_char);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ unsigned int *pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
+ ASSERT_TRUE(pointer_uint);
+ EXPECT_LE(buffer_.get(), reinterpret_cast<char *>(pointer_uint));
+ EXPECT_GE(buffer_.get() + kBufferSize,
+ reinterpret_cast<char *>(pointer_uint + kSize));
+
+ // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
+ // directly, except from the remaining size.
+ EXPECT_EQ(kBufferSize - kSize * sizeof(*pointer_uint),
+ allocator_->GetLargestFreeSize());
+ allocator_->Free(pointer_uint);
+}
+
+// Test alloc 0 fails.
+TEST_F(FencedAllocatorWrapperTest, TestAllocZero) {
+ allocator_->CheckConsistency();
+
+ void *pointer = allocator_->Alloc(0);
+ ASSERT_FALSE(pointer);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Checks that allocation offsets are aligned to multiples of 16 bytes.
+TEST_F(FencedAllocatorWrapperTest, TestAlignment) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize1 = 75;
+ void *pointer1 = allocator_->Alloc(kSize1);
+ ASSERT_TRUE(pointer1);
+ EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1) & (kAllocAlignment - 1), 0);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize2 = 43;
+ void *pointer2 = allocator_->Alloc(kSize2);
+ ASSERT_TRUE(pointer2);
+ EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2) & (kAllocAlignment - 1), 0);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(pointer2);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(pointer1);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Checks out-of-memory condition.
+TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ void *pointers[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ pointers[i] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ void *pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, reallocate with half the size
+ allocator_->Free(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ pointers[0] = allocator_->Alloc(kSize/2);
+ EXPECT_TRUE(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // This allocation should fail as well.
+ pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ void *pointers[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ pointers[i] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ void *pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(pointers[0], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed.
+ pointers[0] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, GetToken());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gl_in_process_context.cc b/gpu/command_buffer/client/gl_in_process_context.cc
new file mode 100644
index 0000000..1441d03
--- /dev/null
+++ b/gpu/command_buffer/client/gl_in_process_context.cc
@@ -0,0 +1,298 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gl_in_process_context.h"
+
+#include <set>
+#include <utility>
+#include <vector>
+
+#include <GLES2/gl2.h>
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES 1
+#endif
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "ui/gfx/size.h"
+#include "ui/gl/gl_image.h"
+
+#if defined(OS_ANDROID)
+#include "ui/gl/android/surface_texture.h"
+#endif
+
+namespace gpu {
+
+namespace {
+
+const int32 kDefaultCommandBufferSize = 1024 * 1024;
+const unsigned int kDefaultStartTransferBufferSize = 4 * 1024 * 1024;
+const unsigned int kDefaultMinTransferBufferSize = 1 * 256 * 1024;
+const unsigned int kDefaultMaxTransferBufferSize = 16 * 1024 * 1024;
+
+class GLInProcessContextImpl
+ : public GLInProcessContext,
+ public base::SupportsWeakPtr<GLInProcessContextImpl> {
+ public:
+ explicit GLInProcessContextImpl(
+ const GLInProcessContextSharedMemoryLimits& mem_limits);
+ virtual ~GLInProcessContextImpl();
+
+ bool Initialize(
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ bool use_global_share_group,
+ GLInProcessContext* share_context,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const gpu::gles2::ContextCreationAttribHelper& attribs,
+ gfx::GpuPreference gpu_preference,
+ const scoped_refptr<InProcessCommandBuffer::Service>& service);
+
+ // GLInProcessContext implementation:
+ virtual void SetContextLostCallback(const base::Closure& callback) OVERRIDE;
+ virtual gles2::GLES2Implementation* GetImplementation() OVERRIDE;
+ virtual size_t GetMappedMemoryLimit() OVERRIDE;
+
+#if defined(OS_ANDROID)
+ virtual scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(
+ uint32 stream_id) OVERRIDE;
+#endif
+
+ private:
+ void Destroy();
+ void OnContextLost();
+ void OnSignalSyncPoint(const base::Closure& callback);
+
+ scoped_ptr<gles2::GLES2CmdHelper> gles2_helper_;
+ scoped_ptr<TransferBuffer> transfer_buffer_;
+ scoped_ptr<gles2::GLES2Implementation> gles2_implementation_;
+ scoped_ptr<InProcessCommandBuffer> command_buffer_;
+
+ const GLInProcessContextSharedMemoryLimits mem_limits_;
+ bool context_lost_;
+ base::Closure context_lost_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLInProcessContextImpl);
+};
+
+base::LazyInstance<base::Lock> g_all_shared_contexts_lock =
+ LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<std::set<GLInProcessContextImpl*> > g_all_shared_contexts =
+ LAZY_INSTANCE_INITIALIZER;
+
+GLInProcessContextImpl::GLInProcessContextImpl(
+ const GLInProcessContextSharedMemoryLimits& mem_limits)
+ : mem_limits_(mem_limits), context_lost_(false) {
+}
+
+GLInProcessContextImpl::~GLInProcessContextImpl() {
+ {
+ base::AutoLock lock(g_all_shared_contexts_lock.Get());
+ g_all_shared_contexts.Get().erase(this);
+ }
+ Destroy();
+}
+
+gles2::GLES2Implementation* GLInProcessContextImpl::GetImplementation() {
+ return gles2_implementation_.get();
+}
+
+size_t GLInProcessContextImpl::GetMappedMemoryLimit() {
+ return mem_limits_.mapped_memory_reclaim_limit;
+}
+
+void GLInProcessContextImpl::SetContextLostCallback(
+ const base::Closure& callback) {
+ context_lost_callback_ = callback;
+}
+
+void GLInProcessContextImpl::OnContextLost() {
+ context_lost_ = true;
+ if (!context_lost_callback_.is_null()) {
+ context_lost_callback_.Run();
+ }
+}
+
+bool GLInProcessContextImpl::Initialize(
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ bool use_global_share_group,
+ GLInProcessContext* share_context,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ const gles2::ContextCreationAttribHelper& attribs,
+ gfx::GpuPreference gpu_preference,
+ const scoped_refptr<InProcessCommandBuffer::Service>& service) {
+ DCHECK(!use_global_share_group || !share_context);
+ DCHECK(size.width() >= 0 && size.height() >= 0);
+
+ std::vector<int32> attrib_vector;
+ attribs.Serialize(&attrib_vector);
+
+ base::Closure wrapped_callback =
+ base::Bind(&GLInProcessContextImpl::OnContextLost, AsWeakPtr());
+ command_buffer_.reset(new InProcessCommandBuffer(service));
+
+ scoped_ptr<base::AutoLock> scoped_shared_context_lock;
+ scoped_refptr<gles2::ShareGroup> share_group;
+ InProcessCommandBuffer* share_command_buffer = NULL;
+ if (use_global_share_group) {
+ scoped_shared_context_lock.reset(
+ new base::AutoLock(g_all_shared_contexts_lock.Get()));
+ for (std::set<GLInProcessContextImpl*>::const_iterator it =
+ g_all_shared_contexts.Get().begin();
+ it != g_all_shared_contexts.Get().end();
+ it++) {
+ const GLInProcessContextImpl* context = *it;
+ if (!context->context_lost_) {
+ share_group = context->gles2_implementation_->share_group();
+ share_command_buffer = context->command_buffer_.get();
+ DCHECK(share_group.get());
+ DCHECK(share_command_buffer);
+ break;
+ }
+ }
+ } else if (share_context) {
+ GLInProcessContextImpl* impl =
+ static_cast<GLInProcessContextImpl*>(share_context);
+ share_group = impl->gles2_implementation_->share_group();
+ share_command_buffer = impl->command_buffer_.get();
+ DCHECK(share_group.get());
+ DCHECK(share_command_buffer);
+ }
+
+ if (!command_buffer_->Initialize(surface,
+ is_offscreen,
+ window,
+ size,
+ attrib_vector,
+ gpu_preference,
+ wrapped_callback,
+ share_command_buffer)) {
+ LOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
+ return false;
+ }
+
+ // Create the GLES2 helper, which writes the command buffer protocol.
+ gles2_helper_.reset(new gles2::GLES2CmdHelper(command_buffer_.get()));
+ if (!gles2_helper_->Initialize(mem_limits_.command_buffer_size)) {
+ LOG(ERROR) << "Failed to initialize GLES2CmdHelper";
+ Destroy();
+ return false;
+ }
+
+ // Create a transfer buffer.
+ transfer_buffer_.reset(new TransferBuffer(gles2_helper_.get()));
+
+ // Check for consistency.
+ DCHECK(!attribs.bind_generates_resource);
+ bool bind_generates_resource = false;
+
+ // Create the object exposing the OpenGL API.
+ gles2_implementation_.reset(
+ new gles2::GLES2Implementation(gles2_helper_.get(),
+ share_group.get(),
+ transfer_buffer_.get(),
+ bind_generates_resource,
+ attribs.lose_context_when_out_of_memory,
+ command_buffer_.get()));
+
+ if (use_global_share_group) {
+ g_all_shared_contexts.Get().insert(this);
+ scoped_shared_context_lock.reset();
+ }
+
+ if (!gles2_implementation_->Initialize(
+ mem_limits_.start_transfer_buffer_size,
+ mem_limits_.min_transfer_buffer_size,
+ mem_limits_.max_transfer_buffer_size,
+ mem_limits_.mapped_memory_reclaim_limit)) {
+ return false;
+ }
+
+ return true;
+}
+
+void GLInProcessContextImpl::Destroy() {
+ if (gles2_implementation_) {
+ // First flush the context to ensure that any pending frees of resources
+ // are completed. Otherwise, if this context is part of a share group,
+ // those resources might leak. Also, any remaining side effects of commands
+ // issued on this context might not be visible to other contexts in the
+ // share group.
+ gles2_implementation_->Flush();
+
+ gles2_implementation_.reset();
+ }
+
+ transfer_buffer_.reset();
+ gles2_helper_.reset();
+ command_buffer_.reset();
+}
+
+#if defined(OS_ANDROID)
+scoped_refptr<gfx::SurfaceTexture>
+GLInProcessContextImpl::GetSurfaceTexture(uint32 stream_id) {
+ return command_buffer_->GetSurfaceTexture(stream_id);
+}
+#endif
+
+} // anonymous namespace
+
+GLInProcessContextSharedMemoryLimits::GLInProcessContextSharedMemoryLimits()
+ : command_buffer_size(kDefaultCommandBufferSize),
+ start_transfer_buffer_size(kDefaultStartTransferBufferSize),
+ min_transfer_buffer_size(kDefaultMinTransferBufferSize),
+ max_transfer_buffer_size(kDefaultMaxTransferBufferSize),
+ mapped_memory_reclaim_limit(gles2::GLES2Implementation::kNoLimit) {
+}
+
+// static
+GLInProcessContext* GLInProcessContext::Create(
+ scoped_refptr<gpu::InProcessCommandBuffer::Service> service,
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ GLInProcessContext* share_context,
+ bool use_global_share_group,
+ const ::gpu::gles2::ContextCreationAttribHelper& attribs,
+ gfx::GpuPreference gpu_preference,
+ const GLInProcessContextSharedMemoryLimits& memory_limits) {
+ DCHECK(!use_global_share_group || !share_context);
+ if (surface.get()) {
+ DCHECK_EQ(surface->IsOffscreen(), is_offscreen);
+ DCHECK(surface->GetSize() == size);
+ DCHECK_EQ(gfx::kNullAcceleratedWidget, window);
+ }
+
+ scoped_ptr<GLInProcessContextImpl> context(
+ new GLInProcessContextImpl(memory_limits));
+ if (!context->Initialize(surface,
+ is_offscreen,
+ use_global_share_group,
+ share_context,
+ window,
+ size,
+ attribs,
+ gpu_preference,
+ service))
+ return NULL;
+
+ return context.release();
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gl_in_process_context.h b/gpu/command_buffer/client/gl_in_process_context.h
new file mode 100644
index 0000000..33b1348
--- /dev/null
+++ b/gpu/command_buffer/client/gl_in_process_context.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GL_IN_PROCESS_CONTEXT_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GL_IN_PROCESS_CONTEXT_H_
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "gl_in_process_context_export.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/service/in_process_command_buffer.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/gpu_preference.h"
+
+namespace gfx {
+class Size;
+}
+
+#if defined(OS_ANDROID)
+namespace gfx {
+class SurfaceTexture;
+}
+#endif
+
+namespace gpu {
+
+namespace gles2 {
+class GLES2Implementation;
+}
+
+struct GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContextSharedMemoryLimits {
+ GLInProcessContextSharedMemoryLimits();
+
+ int32 command_buffer_size;
+ unsigned int start_transfer_buffer_size;
+ unsigned int min_transfer_buffer_size;
+ unsigned int max_transfer_buffer_size;
+ unsigned int mapped_memory_reclaim_limit;
+};
+
+class GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContext {
+ public:
+ virtual ~GLInProcessContext() {}
+
+ // Create a GLInProcessContext, if |is_offscreen| is true, renders to an
+ // offscreen context. |attrib_list| must be NULL or a NONE-terminated list
+ // of attribute/value pairs.
+ // If |surface| is not NULL, then it must match |is_offscreen| and |size|,
+ // |window| must be gfx::kNullAcceleratedWidget, and the command buffer
+ // service must run on the same thread as this client because GLSurface is
+ // not thread safe. If |surface| is NULL, then the other parameters are used
+ // to correctly create a surface.
+ // Only one of |share_context| and |use_global_share_group| can be used at
+ // the same time.
+ static GLInProcessContext* Create(
+ scoped_refptr<gpu::InProcessCommandBuffer::Service> service,
+ scoped_refptr<gfx::GLSurface> surface,
+ bool is_offscreen,
+ gfx::AcceleratedWidget window,
+ const gfx::Size& size,
+ GLInProcessContext* share_context,
+ bool use_global_share_group,
+ const gpu::gles2::ContextCreationAttribHelper& attribs,
+ gfx::GpuPreference gpu_preference,
+ const GLInProcessContextSharedMemoryLimits& memory_limits);
+
+ virtual void SetContextLostCallback(const base::Closure& callback) = 0;
+
+ // Allows direct access to the GLES2 implementation so a GLInProcessContext
+ // can be used without making it current.
+ virtual gles2::GLES2Implementation* GetImplementation() = 0;
+
+ virtual size_t GetMappedMemoryLimit() = 0;
+
+#if defined(OS_ANDROID)
+ virtual scoped_refptr<gfx::SurfaceTexture> GetSurfaceTexture(
+ uint32 stream_id) = 0;
+#endif
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GL_IN_PROCESS_CONTEXT_H_
diff --git a/gpu/command_buffer/client/gl_in_process_context_export.h b/gpu/command_buffer/client/gl_in_process_context_export.h
new file mode 100644
index 0000000..36c4a34
--- /dev/null
+++ b/gpu/command_buffer/client/gl_in_process_context_export.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GL_IN_PROCESS_CONTEXT_EXPORT_H_
+#define GL_IN_PROCESS_CONTEXT_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(GL_IN_PROCESS_CONTEXT_IMPLEMENTATION)
+#define GL_IN_PROCESS_CONTEXT_EXPORT __declspec(dllexport)
+#else
+#define GL_IN_PROCESS_CONTEXT_EXPORT __declspec(dllimport)
+#endif // defined(GL_IN_PROCESS_CONTEXT_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(GL_IN_PROCESS_CONTEXT_IMPLEMENTATION)
+#define GL_IN_PROCESS_CONTEXT_EXPORT __attribute__((visibility("default")))
+#else
+#define GL_IN_PROCESS_CONTEXT_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define GL_IN_PROCESS_CONTEXT_EXPORT
+#endif
+
+#endif // GL_IN_PROCESS_CONTEXT_EXPORT_H_
diff --git a/gpu/command_buffer/client/gles2_c_lib.cc b/gpu/command_buffer/client/gles2_c_lib.cc
new file mode 100644
index 0000000..fbe7b55
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_c_lib.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// These functions emulate GLES2 over command buffers for C.
+
+#include <assert.h>
+#include <stdlib.h>
+#include "gpu/command_buffer/client/gles2_lib.h"
+
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES
+#endif
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+extern "C" {
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/gles2_c_lib_autogen.h"
+} // extern "C"
+
+
diff --git a/gpu/command_buffer/client/gles2_c_lib_autogen.h b/gpu/command_buffer/client/gles2_c_lib_autogen.h
new file mode 100644
index 0000000..e11cf63
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -0,0 +1,1849 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// These functions emulate GLES2 over command buffers.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_AUTOGEN_H_
+
+void GLES2ActiveTexture(GLenum texture) {
+ gles2::GetGLContext()->ActiveTexture(texture);
+}
+void GLES2AttachShader(GLuint program, GLuint shader) {
+ gles2::GetGLContext()->AttachShader(program, shader);
+}
+void GLES2BindAttribLocation(GLuint program, GLuint index, const char* name) {
+ gles2::GetGLContext()->BindAttribLocation(program, index, name);
+}
+void GLES2BindBuffer(GLenum target, GLuint buffer) {
+ gles2::GetGLContext()->BindBuffer(target, buffer);
+}
+void GLES2BindFramebuffer(GLenum target, GLuint framebuffer) {
+ gles2::GetGLContext()->BindFramebuffer(target, framebuffer);
+}
+void GLES2BindRenderbuffer(GLenum target, GLuint renderbuffer) {
+ gles2::GetGLContext()->BindRenderbuffer(target, renderbuffer);
+}
+void GLES2BindTexture(GLenum target, GLuint texture) {
+ gles2::GetGLContext()->BindTexture(target, texture);
+}
+void GLES2BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ gles2::GetGLContext()->BlendColor(red, green, blue, alpha);
+}
+void GLES2BlendEquation(GLenum mode) {
+ gles2::GetGLContext()->BlendEquation(mode);
+}
+void GLES2BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) {
+ gles2::GetGLContext()->BlendEquationSeparate(modeRGB, modeAlpha);
+}
+void GLES2BlendFunc(GLenum sfactor, GLenum dfactor) {
+ gles2::GetGLContext()->BlendFunc(sfactor, dfactor);
+}
+void GLES2BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ gles2::GetGLContext()->BlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha);
+}
+void GLES2BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) {
+ gles2::GetGLContext()->BufferData(target, size, data, usage);
+}
+void GLES2BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) {
+ gles2::GetGLContext()->BufferSubData(target, offset, size, data);
+}
+GLenum GLES2CheckFramebufferStatus(GLenum target) {
+ return gles2::GetGLContext()->CheckFramebufferStatus(target);
+}
+void GLES2Clear(GLbitfield mask) {
+ gles2::GetGLContext()->Clear(mask);
+}
+void GLES2ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ gles2::GetGLContext()->ClearColor(red, green, blue, alpha);
+}
+void GLES2ClearDepthf(GLclampf depth) {
+ gles2::GetGLContext()->ClearDepthf(depth);
+}
+void GLES2ClearStencil(GLint s) {
+ gles2::GetGLContext()->ClearStencil(s);
+}
+void GLES2ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ gles2::GetGLContext()->ColorMask(red, green, blue, alpha);
+}
+void GLES2CompileShader(GLuint shader) {
+ gles2::GetGLContext()->CompileShader(shader);
+}
+void GLES2CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) {
+ gles2::GetGLContext()->CompressedTexImage2D(
+ target, level, internalformat, width, height, border, imageSize, data);
+}
+void GLES2CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) {
+ gles2::GetGLContext()->CompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, imageSize, data);
+}
+void GLES2CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) {
+ gles2::GetGLContext()->CopyTexImage2D(
+ target, level, internalformat, x, y, width, height, border);
+}
+void GLES2CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->CopyTexSubImage2D(
+ target, level, xoffset, yoffset, x, y, width, height);
+}
+GLuint GLES2CreateProgram() {
+ return gles2::GetGLContext()->CreateProgram();
+}
+GLuint GLES2CreateShader(GLenum type) {
+ return gles2::GetGLContext()->CreateShader(type);
+}
+void GLES2CullFace(GLenum mode) {
+ gles2::GetGLContext()->CullFace(mode);
+}
+void GLES2DeleteBuffers(GLsizei n, const GLuint* buffers) {
+ gles2::GetGLContext()->DeleteBuffers(n, buffers);
+}
+void GLES2DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) {
+ gles2::GetGLContext()->DeleteFramebuffers(n, framebuffers);
+}
+void GLES2DeleteProgram(GLuint program) {
+ gles2::GetGLContext()->DeleteProgram(program);
+}
+void GLES2DeleteRenderbuffers(GLsizei n, const GLuint* renderbuffers) {
+ gles2::GetGLContext()->DeleteRenderbuffers(n, renderbuffers);
+}
+void GLES2DeleteShader(GLuint shader) {
+ gles2::GetGLContext()->DeleteShader(shader);
+}
+void GLES2DeleteTextures(GLsizei n, const GLuint* textures) {
+ gles2::GetGLContext()->DeleteTextures(n, textures);
+}
+void GLES2DepthFunc(GLenum func) {
+ gles2::GetGLContext()->DepthFunc(func);
+}
+void GLES2DepthMask(GLboolean flag) {
+ gles2::GetGLContext()->DepthMask(flag);
+}
+void GLES2DepthRangef(GLclampf zNear, GLclampf zFar) {
+ gles2::GetGLContext()->DepthRangef(zNear, zFar);
+}
+void GLES2DetachShader(GLuint program, GLuint shader) {
+ gles2::GetGLContext()->DetachShader(program, shader);
+}
+void GLES2Disable(GLenum cap) {
+ gles2::GetGLContext()->Disable(cap);
+}
+void GLES2DisableVertexAttribArray(GLuint index) {
+ gles2::GetGLContext()->DisableVertexAttribArray(index);
+}
+void GLES2DrawArrays(GLenum mode, GLint first, GLsizei count) {
+ gles2::GetGLContext()->DrawArrays(mode, first, count);
+}
+void GLES2DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) {
+ gles2::GetGLContext()->DrawElements(mode, count, type, indices);
+}
+void GLES2Enable(GLenum cap) {
+ gles2::GetGLContext()->Enable(cap);
+}
+void GLES2EnableVertexAttribArray(GLuint index) {
+ gles2::GetGLContext()->EnableVertexAttribArray(index);
+}
+void GLES2Finish() {
+ gles2::GetGLContext()->Finish();
+}
+void GLES2Flush() {
+ gles2::GetGLContext()->Flush();
+}
+void GLES2FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) {
+ gles2::GetGLContext()->FramebufferRenderbuffer(
+ target, attachment, renderbuffertarget, renderbuffer);
+}
+void GLES2FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) {
+ gles2::GetGLContext()->FramebufferTexture2D(
+ target, attachment, textarget, texture, level);
+}
+void GLES2FrontFace(GLenum mode) {
+ gles2::GetGLContext()->FrontFace(mode);
+}
+void GLES2GenBuffers(GLsizei n, GLuint* buffers) {
+ gles2::GetGLContext()->GenBuffers(n, buffers);
+}
+void GLES2GenerateMipmap(GLenum target) {
+ gles2::GetGLContext()->GenerateMipmap(target);
+}
+void GLES2GenFramebuffers(GLsizei n, GLuint* framebuffers) {
+ gles2::GetGLContext()->GenFramebuffers(n, framebuffers);
+}
+void GLES2GenRenderbuffers(GLsizei n, GLuint* renderbuffers) {
+ gles2::GetGLContext()->GenRenderbuffers(n, renderbuffers);
+}
+void GLES2GenTextures(GLsizei n, GLuint* textures) {
+ gles2::GetGLContext()->GenTextures(n, textures);
+}
+void GLES2GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) {
+ gles2::GetGLContext()->GetActiveAttrib(
+ program, index, bufsize, length, size, type, name);
+}
+void GLES2GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) {
+ gles2::GetGLContext()->GetActiveUniform(
+ program, index, bufsize, length, size, type, name);
+}
+void GLES2GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) {
+ gles2::GetGLContext()->GetAttachedShaders(program, maxcount, count, shaders);
+}
+GLint GLES2GetAttribLocation(GLuint program, const char* name) {
+ return gles2::GetGLContext()->GetAttribLocation(program, name);
+}
+void GLES2GetBooleanv(GLenum pname, GLboolean* params) {
+ gles2::GetGLContext()->GetBooleanv(pname, params);
+}
+void GLES2GetBufferParameteriv(GLenum target, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetBufferParameteriv(target, pname, params);
+}
+GLenum GLES2GetError() {
+ return gles2::GetGLContext()->GetError();
+}
+void GLES2GetFloatv(GLenum pname, GLfloat* params) {
+ gles2::GetGLContext()->GetFloatv(pname, params);
+}
+void GLES2GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) {
+ gles2::GetGLContext()->GetFramebufferAttachmentParameteriv(
+ target, attachment, pname, params);
+}
+void GLES2GetIntegerv(GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetIntegerv(pname, params);
+}
+void GLES2GetProgramiv(GLuint program, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetProgramiv(program, pname, params);
+}
+void GLES2GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ gles2::GetGLContext()->GetProgramInfoLog(program, bufsize, length, infolog);
+}
+void GLES2GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ gles2::GetGLContext()->GetRenderbufferParameteriv(target, pname, params);
+}
+void GLES2GetShaderiv(GLuint shader, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetShaderiv(shader, pname, params);
+}
+void GLES2GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ gles2::GetGLContext()->GetShaderInfoLog(shader, bufsize, length, infolog);
+}
+void GLES2GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) {
+ gles2::GetGLContext()->GetShaderPrecisionFormat(
+ shadertype, precisiontype, range, precision);
+}
+void GLES2GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ gles2::GetGLContext()->GetShaderSource(shader, bufsize, length, source);
+}
+const GLubyte* GLES2GetString(GLenum name) {
+ return gles2::GetGLContext()->GetString(name);
+}
+void GLES2GetTexParameterfv(GLenum target, GLenum pname, GLfloat* params) {
+ gles2::GetGLContext()->GetTexParameterfv(target, pname, params);
+}
+void GLES2GetTexParameteriv(GLenum target, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetTexParameteriv(target, pname, params);
+}
+void GLES2GetUniformfv(GLuint program, GLint location, GLfloat* params) {
+ gles2::GetGLContext()->GetUniformfv(program, location, params);
+}
+void GLES2GetUniformiv(GLuint program, GLint location, GLint* params) {
+ gles2::GetGLContext()->GetUniformiv(program, location, params);
+}
+GLint GLES2GetUniformLocation(GLuint program, const char* name) {
+ return gles2::GetGLContext()->GetUniformLocation(program, name);
+}
+void GLES2GetVertexAttribfv(GLuint index, GLenum pname, GLfloat* params) {
+ gles2::GetGLContext()->GetVertexAttribfv(index, pname, params);
+}
+void GLES2GetVertexAttribiv(GLuint index, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetVertexAttribiv(index, pname, params);
+}
+void GLES2GetVertexAttribPointerv(GLuint index, GLenum pname, void** pointer) {
+ gles2::GetGLContext()->GetVertexAttribPointerv(index, pname, pointer);
+}
+void GLES2Hint(GLenum target, GLenum mode) {
+ gles2::GetGLContext()->Hint(target, mode);
+}
+GLboolean GLES2IsBuffer(GLuint buffer) {
+ return gles2::GetGLContext()->IsBuffer(buffer);
+}
+GLboolean GLES2IsEnabled(GLenum cap) {
+ return gles2::GetGLContext()->IsEnabled(cap);
+}
+GLboolean GLES2IsFramebuffer(GLuint framebuffer) {
+ return gles2::GetGLContext()->IsFramebuffer(framebuffer);
+}
+GLboolean GLES2IsProgram(GLuint program) {
+ return gles2::GetGLContext()->IsProgram(program);
+}
+GLboolean GLES2IsRenderbuffer(GLuint renderbuffer) {
+ return gles2::GetGLContext()->IsRenderbuffer(renderbuffer);
+}
+GLboolean GLES2IsShader(GLuint shader) {
+ return gles2::GetGLContext()->IsShader(shader);
+}
+GLboolean GLES2IsTexture(GLuint texture) {
+ return gles2::GetGLContext()->IsTexture(texture);
+}
+void GLES2LineWidth(GLfloat width) {
+ gles2::GetGLContext()->LineWidth(width);
+}
+void GLES2LinkProgram(GLuint program) {
+ gles2::GetGLContext()->LinkProgram(program);
+}
+void GLES2PixelStorei(GLenum pname, GLint param) {
+ gles2::GetGLContext()->PixelStorei(pname, param);
+}
+void GLES2PolygonOffset(GLfloat factor, GLfloat units) {
+ gles2::GetGLContext()->PolygonOffset(factor, units);
+}
+void GLES2ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) {
+ gles2::GetGLContext()->ReadPixels(x, y, width, height, format, type, pixels);
+}
+void GLES2ReleaseShaderCompiler() {
+ gles2::GetGLContext()->ReleaseShaderCompiler();
+}
+void GLES2RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->RenderbufferStorage(
+ target, internalformat, width, height);
+}
+void GLES2SampleCoverage(GLclampf value, GLboolean invert) {
+ gles2::GetGLContext()->SampleCoverage(value, invert);
+}
+void GLES2Scissor(GLint x, GLint y, GLsizei width, GLsizei height) {
+ gles2::GetGLContext()->Scissor(x, y, width, height);
+}
+void GLES2ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) {
+ gles2::GetGLContext()->ShaderBinary(n, shaders, binaryformat, binary, length);
+}
+void GLES2ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) {
+ gles2::GetGLContext()->ShaderSource(shader, count, str, length);
+}
+void GLES2ShallowFinishCHROMIUM() {
+ gles2::GetGLContext()->ShallowFinishCHROMIUM();
+}
+void GLES2ShallowFlushCHROMIUM() {
+ gles2::GetGLContext()->ShallowFlushCHROMIUM();
+}
+void GLES2StencilFunc(GLenum func, GLint ref, GLuint mask) {
+ gles2::GetGLContext()->StencilFunc(func, ref, mask);
+}
+void GLES2StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) {
+ gles2::GetGLContext()->StencilFuncSeparate(face, func, ref, mask);
+}
+void GLES2StencilMask(GLuint mask) {
+ gles2::GetGLContext()->StencilMask(mask);
+}
+void GLES2StencilMaskSeparate(GLenum face, GLuint mask) {
+ gles2::GetGLContext()->StencilMaskSeparate(face, mask);
+}
+void GLES2StencilOp(GLenum fail, GLenum zfail, GLenum zpass) {
+ gles2::GetGLContext()->StencilOp(fail, zfail, zpass);
+}
+void GLES2StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) {
+ gles2::GetGLContext()->StencilOpSeparate(face, fail, zfail, zpass);
+}
+void GLES2TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ gles2::GetGLContext()->TexImage2D(target,
+ level,
+ internalformat,
+ width,
+ height,
+ border,
+ format,
+ type,
+ pixels);
+}
+void GLES2TexParameterf(GLenum target, GLenum pname, GLfloat param) {
+ gles2::GetGLContext()->TexParameterf(target, pname, param);
+}
+void GLES2TexParameterfv(GLenum target, GLenum pname, const GLfloat* params) {
+ gles2::GetGLContext()->TexParameterfv(target, pname, params);
+}
+void GLES2TexParameteri(GLenum target, GLenum pname, GLint param) {
+ gles2::GetGLContext()->TexParameteri(target, pname, param);
+}
+void GLES2TexParameteriv(GLenum target, GLenum pname, const GLint* params) {
+ gles2::GetGLContext()->TexParameteriv(target, pname, params);
+}
+void GLES2TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ gles2::GetGLContext()->TexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, pixels);
+}
+void GLES2Uniform1f(GLint location, GLfloat x) {
+ gles2::GetGLContext()->Uniform1f(location, x);
+}
+void GLES2Uniform1fv(GLint location, GLsizei count, const GLfloat* v) {
+ gles2::GetGLContext()->Uniform1fv(location, count, v);
+}
+void GLES2Uniform1i(GLint location, GLint x) {
+ gles2::GetGLContext()->Uniform1i(location, x);
+}
+void GLES2Uniform1iv(GLint location, GLsizei count, const GLint* v) {
+ gles2::GetGLContext()->Uniform1iv(location, count, v);
+}
+void GLES2Uniform2f(GLint location, GLfloat x, GLfloat y) {
+ gles2::GetGLContext()->Uniform2f(location, x, y);
+}
+void GLES2Uniform2fv(GLint location, GLsizei count, const GLfloat* v) {
+ gles2::GetGLContext()->Uniform2fv(location, count, v);
+}
+void GLES2Uniform2i(GLint location, GLint x, GLint y) {
+ gles2::GetGLContext()->Uniform2i(location, x, y);
+}
+void GLES2Uniform2iv(GLint location, GLsizei count, const GLint* v) {
+ gles2::GetGLContext()->Uniform2iv(location, count, v);
+}
+void GLES2Uniform3f(GLint location, GLfloat x, GLfloat y, GLfloat z) {
+ gles2::GetGLContext()->Uniform3f(location, x, y, z);
+}
+void GLES2Uniform3fv(GLint location, GLsizei count, const GLfloat* v) {
+ gles2::GetGLContext()->Uniform3fv(location, count, v);
+}
+void GLES2Uniform3i(GLint location, GLint x, GLint y, GLint z) {
+ gles2::GetGLContext()->Uniform3i(location, x, y, z);
+}
+void GLES2Uniform3iv(GLint location, GLsizei count, const GLint* v) {
+ gles2::GetGLContext()->Uniform3iv(location, count, v);
+}
+void GLES2Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ gles2::GetGLContext()->Uniform4f(location, x, y, z, w);
+}
+void GLES2Uniform4fv(GLint location, GLsizei count, const GLfloat* v) {
+ gles2::GetGLContext()->Uniform4fv(location, count, v);
+}
+void GLES2Uniform4i(GLint location, GLint x, GLint y, GLint z, GLint w) {
+ gles2::GetGLContext()->Uniform4i(location, x, y, z, w);
+}
+void GLES2Uniform4iv(GLint location, GLsizei count, const GLint* v) {
+ gles2::GetGLContext()->Uniform4iv(location, count, v);
+}
+void GLES2UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ gles2::GetGLContext()->UniformMatrix2fv(location, count, transpose, value);
+}
+void GLES2UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ gles2::GetGLContext()->UniformMatrix3fv(location, count, transpose, value);
+}
+void GLES2UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ gles2::GetGLContext()->UniformMatrix4fv(location, count, transpose, value);
+}
+void GLES2UseProgram(GLuint program) {
+ gles2::GetGLContext()->UseProgram(program);
+}
+void GLES2ValidateProgram(GLuint program) {
+ gles2::GetGLContext()->ValidateProgram(program);
+}
+void GLES2VertexAttrib1f(GLuint indx, GLfloat x) {
+ gles2::GetGLContext()->VertexAttrib1f(indx, x);
+}
+void GLES2VertexAttrib1fv(GLuint indx, const GLfloat* values) {
+ gles2::GetGLContext()->VertexAttrib1fv(indx, values);
+}
+void GLES2VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) {
+ gles2::GetGLContext()->VertexAttrib2f(indx, x, y);
+}
+void GLES2VertexAttrib2fv(GLuint indx, const GLfloat* values) {
+ gles2::GetGLContext()->VertexAttrib2fv(indx, values);
+}
+void GLES2VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z) {
+ gles2::GetGLContext()->VertexAttrib3f(indx, x, y, z);
+}
+void GLES2VertexAttrib3fv(GLuint indx, const GLfloat* values) {
+ gles2::GetGLContext()->VertexAttrib3fv(indx, values);
+}
+void GLES2VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ gles2::GetGLContext()->VertexAttrib4f(indx, x, y, z, w);
+}
+void GLES2VertexAttrib4fv(GLuint indx, const GLfloat* values) {
+ gles2::GetGLContext()->VertexAttrib4fv(indx, values);
+}
+void GLES2VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) {
+ gles2::GetGLContext()->VertexAttribPointer(
+ indx, size, type, normalized, stride, ptr);
+}
+void GLES2Viewport(GLint x, GLint y, GLsizei width, GLsizei height) {
+ gles2::GetGLContext()->Viewport(x, y, width, height);
+}
+void GLES2BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ gles2::GetGLContext()->BlitFramebufferCHROMIUM(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+}
+void GLES2RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->RenderbufferStorageMultisampleCHROMIUM(
+ target, samples, internalformat, width, height);
+}
+void GLES2RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->RenderbufferStorageMultisampleEXT(
+ target, samples, internalformat, width, height);
+}
+void GLES2FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) {
+ gles2::GetGLContext()->FramebufferTexture2DMultisampleEXT(
+ target, attachment, textarget, texture, level, samples);
+}
+void GLES2TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::GetGLContext()->TexStorage2DEXT(
+ target, levels, internalFormat, width, height);
+}
+void GLES2GenQueriesEXT(GLsizei n, GLuint* queries) {
+ gles2::GetGLContext()->GenQueriesEXT(n, queries);
+}
+void GLES2DeleteQueriesEXT(GLsizei n, const GLuint* queries) {
+ gles2::GetGLContext()->DeleteQueriesEXT(n, queries);
+}
+GLboolean GLES2IsQueryEXT(GLuint id) {
+ return gles2::GetGLContext()->IsQueryEXT(id);
+}
+void GLES2BeginQueryEXT(GLenum target, GLuint id) {
+ gles2::GetGLContext()->BeginQueryEXT(target, id);
+}
+void GLES2EndQueryEXT(GLenum target) {
+ gles2::GetGLContext()->EndQueryEXT(target);
+}
+void GLES2GetQueryivEXT(GLenum target, GLenum pname, GLint* params) {
+ gles2::GetGLContext()->GetQueryivEXT(target, pname, params);
+}
+void GLES2GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) {
+ gles2::GetGLContext()->GetQueryObjectuivEXT(id, pname, params);
+}
+void GLES2InsertEventMarkerEXT(GLsizei length, const GLchar* marker) {
+ gles2::GetGLContext()->InsertEventMarkerEXT(length, marker);
+}
+void GLES2PushGroupMarkerEXT(GLsizei length, const GLchar* marker) {
+ gles2::GetGLContext()->PushGroupMarkerEXT(length, marker);
+}
+void GLES2PopGroupMarkerEXT() {
+ gles2::GetGLContext()->PopGroupMarkerEXT();
+}
+void GLES2GenVertexArraysOES(GLsizei n, GLuint* arrays) {
+ gles2::GetGLContext()->GenVertexArraysOES(n, arrays);
+}
+void GLES2DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) {
+ gles2::GetGLContext()->DeleteVertexArraysOES(n, arrays);
+}
+GLboolean GLES2IsVertexArrayOES(GLuint array) {
+ return gles2::GetGLContext()->IsVertexArrayOES(array);
+}
+void GLES2BindVertexArrayOES(GLuint array) {
+ gles2::GetGLContext()->BindVertexArrayOES(array);
+}
+void GLES2SwapBuffers() {
+ gles2::GetGLContext()->SwapBuffers();
+}
+GLuint GLES2GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) {
+ return gles2::GetGLContext()->GetMaxValueInBufferCHROMIUM(
+ buffer_id, count, type, offset);
+}
+GLboolean GLES2EnableFeatureCHROMIUM(const char* feature) {
+ return gles2::GetGLContext()->EnableFeatureCHROMIUM(feature);
+}
+void* GLES2MapBufferCHROMIUM(GLuint target, GLenum access) {
+ return gles2::GetGLContext()->MapBufferCHROMIUM(target, access);
+}
+GLboolean GLES2UnmapBufferCHROMIUM(GLuint target) {
+ return gles2::GetGLContext()->UnmapBufferCHROMIUM(target);
+}
+void* GLES2MapImageCHROMIUM(GLuint image_id) {
+ return gles2::GetGLContext()->MapImageCHROMIUM(image_id);
+}
+void GLES2UnmapImageCHROMIUM(GLuint image_id) {
+ gles2::GetGLContext()->UnmapImageCHROMIUM(image_id);
+}
+void* GLES2MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) {
+ return gles2::GetGLContext()->MapBufferSubDataCHROMIUM(
+ target, offset, size, access);
+}
+void GLES2UnmapBufferSubDataCHROMIUM(const void* mem) {
+ gles2::GetGLContext()->UnmapBufferSubDataCHROMIUM(mem);
+}
+void* GLES2MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) {
+ return gles2::GetGLContext()->MapTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type, access);
+}
+void GLES2UnmapTexSubImage2DCHROMIUM(const void* mem) {
+ gles2::GetGLContext()->UnmapTexSubImage2DCHROMIUM(mem);
+}
+void GLES2ResizeCHROMIUM(GLuint width, GLuint height, GLfloat scale_factor) {
+ gles2::GetGLContext()->ResizeCHROMIUM(width, height, scale_factor);
+}
+const GLchar* GLES2GetRequestableExtensionsCHROMIUM() {
+ return gles2::GetGLContext()->GetRequestableExtensionsCHROMIUM();
+}
+void GLES2RequestExtensionCHROMIUM(const char* extension) {
+ gles2::GetGLContext()->RequestExtensionCHROMIUM(extension);
+}
+void GLES2RateLimitOffscreenContextCHROMIUM() {
+ gles2::GetGLContext()->RateLimitOffscreenContextCHROMIUM();
+}
+void GLES2GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) {
+ gles2::GetGLContext()->GetMultipleIntegervCHROMIUM(
+ pnames, count, results, size);
+}
+void GLES2GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) {
+ gles2::GetGLContext()->GetProgramInfoCHROMIUM(program, bufsize, size, info);
+}
+GLuint GLES2CreateStreamTextureCHROMIUM(GLuint texture) {
+ return gles2::GetGLContext()->CreateStreamTextureCHROMIUM(texture);
+}
+GLuint GLES2CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ return gles2::GetGLContext()->CreateImageCHROMIUM(
+ width, height, internalformat, usage);
+}
+void GLES2DestroyImageCHROMIUM(GLuint image_id) {
+ gles2::GetGLContext()->DestroyImageCHROMIUM(image_id);
+}
+void GLES2GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) {
+ gles2::GetGLContext()->GetImageParameterivCHROMIUM(image_id, pname, params);
+}
+GLuint GLES2CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ return gles2::GetGLContext()->CreateGpuMemoryBufferImageCHROMIUM(
+ width, height, internalformat, usage);
+}
+void GLES2GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ gles2::GetGLContext()->GetTranslatedShaderSourceANGLE(
+ shader, bufsize, length, source);
+}
+void GLES2PostSubBufferCHROMIUM(GLint x, GLint y, GLint width, GLint height) {
+ gles2::GetGLContext()->PostSubBufferCHROMIUM(x, y, width, height);
+}
+void GLES2TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) {
+ gles2::GetGLContext()->TexImageIOSurface2DCHROMIUM(
+ target, width, height, ioSurfaceId, plane);
+}
+void GLES2CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) {
+ gles2::GetGLContext()->CopyTextureCHROMIUM(
+ target, source_id, dest_id, level, internalformat, dest_type);
+}
+void GLES2DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) {
+ gles2::GetGLContext()->DrawArraysInstancedANGLE(
+ mode, first, count, primcount);
+}
+void GLES2DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) {
+ gles2::GetGLContext()->DrawElementsInstancedANGLE(
+ mode, count, type, indices, primcount);
+}
+void GLES2VertexAttribDivisorANGLE(GLuint index, GLuint divisor) {
+ gles2::GetGLContext()->VertexAttribDivisorANGLE(index, divisor);
+}
+void GLES2GenMailboxCHROMIUM(GLbyte* mailbox) {
+ gles2::GetGLContext()->GenMailboxCHROMIUM(mailbox);
+}
+void GLES2ProduceTextureCHROMIUM(GLenum target, const GLbyte* mailbox) {
+ gles2::GetGLContext()->ProduceTextureCHROMIUM(target, mailbox);
+}
+void GLES2ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) {
+ gles2::GetGLContext()->ProduceTextureDirectCHROMIUM(texture, target, mailbox);
+}
+void GLES2ConsumeTextureCHROMIUM(GLenum target, const GLbyte* mailbox) {
+ gles2::GetGLContext()->ConsumeTextureCHROMIUM(target, mailbox);
+}
+GLuint GLES2CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) {
+ return gles2::GetGLContext()->CreateAndConsumeTextureCHROMIUM(target,
+ mailbox);
+}
+void GLES2BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) {
+ gles2::GetGLContext()->BindUniformLocationCHROMIUM(program, location, name);
+}
+void GLES2BindTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ gles2::GetGLContext()->BindTexImage2DCHROMIUM(target, imageId);
+}
+void GLES2ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ gles2::GetGLContext()->ReleaseTexImage2DCHROMIUM(target, imageId);
+}
+void GLES2TraceBeginCHROMIUM(const char* name) {
+ gles2::GetGLContext()->TraceBeginCHROMIUM(name);
+}
+void GLES2TraceEndCHROMIUM() {
+ gles2::GetGLContext()->TraceEndCHROMIUM();
+}
+void GLES2AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) {
+ gles2::GetGLContext()->AsyncTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type, data);
+}
+void GLES2AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ gles2::GetGLContext()->AsyncTexImage2DCHROMIUM(target,
+ level,
+ internalformat,
+ width,
+ height,
+ border,
+ format,
+ type,
+ pixels);
+}
+void GLES2WaitAsyncTexImage2DCHROMIUM(GLenum target) {
+ gles2::GetGLContext()->WaitAsyncTexImage2DCHROMIUM(target);
+}
+void GLES2WaitAllAsyncTexImage2DCHROMIUM() {
+ gles2::GetGLContext()->WaitAllAsyncTexImage2DCHROMIUM();
+}
+void GLES2DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) {
+ gles2::GetGLContext()->DiscardFramebufferEXT(target, count, attachments);
+}
+void GLES2LoseContextCHROMIUM(GLenum current, GLenum other) {
+ gles2::GetGLContext()->LoseContextCHROMIUM(current, other);
+}
+GLuint GLES2InsertSyncPointCHROMIUM() {
+ return gles2::GetGLContext()->InsertSyncPointCHROMIUM();
+}
+void GLES2WaitSyncPointCHROMIUM(GLuint sync_point) {
+ gles2::GetGLContext()->WaitSyncPointCHROMIUM(sync_point);
+}
+void GLES2DrawBuffersEXT(GLsizei count, const GLenum* bufs) {
+ gles2::GetGLContext()->DrawBuffersEXT(count, bufs);
+}
+void GLES2DiscardBackbufferCHROMIUM() {
+ gles2::GetGLContext()->DiscardBackbufferCHROMIUM();
+}
+void GLES2ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) {
+ gles2::GetGLContext()->ScheduleOverlayPlaneCHROMIUM(plane_z_order,
+ plane_transform,
+ overlay_texture_id,
+ bounds_x,
+ bounds_y,
+ bounds_width,
+ bounds_height,
+ uv_x,
+ uv_y,
+ uv_width,
+ uv_height);
+}
+void GLES2MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) {
+ gles2::GetGLContext()->MatrixLoadfCHROMIUM(matrixMode, m);
+}
+void GLES2MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
+ gles2::GetGLContext()->MatrixLoadIdentityCHROMIUM(matrixMode);
+}
+
+namespace gles2 {
+
+extern const NameToFunc g_gles2_function_table[] = {
+ {
+ "glActiveTexture",
+ reinterpret_cast<GLES2FunctionPointer>(glActiveTexture),
+ },
+ {
+ "glAttachShader",
+ reinterpret_cast<GLES2FunctionPointer>(glAttachShader),
+ },
+ {
+ "glBindAttribLocation",
+ reinterpret_cast<GLES2FunctionPointer>(glBindAttribLocation),
+ },
+ {
+ "glBindBuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glBindBuffer),
+ },
+ {
+ "glBindFramebuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glBindFramebuffer),
+ },
+ {
+ "glBindRenderbuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glBindRenderbuffer),
+ },
+ {
+ "glBindTexture",
+ reinterpret_cast<GLES2FunctionPointer>(glBindTexture),
+ },
+ {
+ "glBlendColor",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendColor),
+ },
+ {
+ "glBlendEquation",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendEquation),
+ },
+ {
+ "glBlendEquationSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendEquationSeparate),
+ },
+ {
+ "glBlendFunc",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendFunc),
+ },
+ {
+ "glBlendFuncSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendFuncSeparate),
+ },
+ {
+ "glBufferData",
+ reinterpret_cast<GLES2FunctionPointer>(glBufferData),
+ },
+ {
+ "glBufferSubData",
+ reinterpret_cast<GLES2FunctionPointer>(glBufferSubData),
+ },
+ {
+ "glCheckFramebufferStatus",
+ reinterpret_cast<GLES2FunctionPointer>(glCheckFramebufferStatus),
+ },
+ {
+ "glClear",
+ reinterpret_cast<GLES2FunctionPointer>(glClear),
+ },
+ {
+ "glClearColor",
+ reinterpret_cast<GLES2FunctionPointer>(glClearColor),
+ },
+ {
+ "glClearDepthf",
+ reinterpret_cast<GLES2FunctionPointer>(glClearDepthf),
+ },
+ {
+ "glClearStencil",
+ reinterpret_cast<GLES2FunctionPointer>(glClearStencil),
+ },
+ {
+ "glColorMask",
+ reinterpret_cast<GLES2FunctionPointer>(glColorMask),
+ },
+ {
+ "glCompileShader",
+ reinterpret_cast<GLES2FunctionPointer>(glCompileShader),
+ },
+ {
+ "glCompressedTexImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glCompressedTexImage2D),
+ },
+ {
+ "glCompressedTexSubImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glCompressedTexSubImage2D),
+ },
+ {
+ "glCopyTexImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glCopyTexImage2D),
+ },
+ {
+ "glCopyTexSubImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glCopyTexSubImage2D),
+ },
+ {
+ "glCreateProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateProgram),
+ },
+ {
+ "glCreateShader",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateShader),
+ },
+ {
+ "glCullFace",
+ reinterpret_cast<GLES2FunctionPointer>(glCullFace),
+ },
+ {
+ "glDeleteBuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteBuffers),
+ },
+ {
+ "glDeleteFramebuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteFramebuffers),
+ },
+ {
+ "glDeleteProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteProgram),
+ },
+ {
+ "glDeleteRenderbuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteRenderbuffers),
+ },
+ {
+ "glDeleteShader",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteShader),
+ },
+ {
+ "glDeleteTextures",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteTextures),
+ },
+ {
+ "glDepthFunc",
+ reinterpret_cast<GLES2FunctionPointer>(glDepthFunc),
+ },
+ {
+ "glDepthMask",
+ reinterpret_cast<GLES2FunctionPointer>(glDepthMask),
+ },
+ {
+ "glDepthRangef",
+ reinterpret_cast<GLES2FunctionPointer>(glDepthRangef),
+ },
+ {
+ "glDetachShader",
+ reinterpret_cast<GLES2FunctionPointer>(glDetachShader),
+ },
+ {
+ "glDisable",
+ reinterpret_cast<GLES2FunctionPointer>(glDisable),
+ },
+ {
+ "glDisableVertexAttribArray",
+ reinterpret_cast<GLES2FunctionPointer>(glDisableVertexAttribArray),
+ },
+ {
+ "glDrawArrays",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawArrays),
+ },
+ {
+ "glDrawElements",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawElements),
+ },
+ {
+ "glEnable",
+ reinterpret_cast<GLES2FunctionPointer>(glEnable),
+ },
+ {
+ "glEnableVertexAttribArray",
+ reinterpret_cast<GLES2FunctionPointer>(glEnableVertexAttribArray),
+ },
+ {
+ "glFinish",
+ reinterpret_cast<GLES2FunctionPointer>(glFinish),
+ },
+ {
+ "glFlush",
+ reinterpret_cast<GLES2FunctionPointer>(glFlush),
+ },
+ {
+ "glFramebufferRenderbuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glFramebufferRenderbuffer),
+ },
+ {
+ "glFramebufferTexture2D",
+ reinterpret_cast<GLES2FunctionPointer>(glFramebufferTexture2D),
+ },
+ {
+ "glFrontFace",
+ reinterpret_cast<GLES2FunctionPointer>(glFrontFace),
+ },
+ {
+ "glGenBuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glGenBuffers),
+ },
+ {
+ "glGenerateMipmap",
+ reinterpret_cast<GLES2FunctionPointer>(glGenerateMipmap),
+ },
+ {
+ "glGenFramebuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glGenFramebuffers),
+ },
+ {
+ "glGenRenderbuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glGenRenderbuffers),
+ },
+ {
+ "glGenTextures",
+ reinterpret_cast<GLES2FunctionPointer>(glGenTextures),
+ },
+ {
+ "glGetActiveAttrib",
+ reinterpret_cast<GLES2FunctionPointer>(glGetActiveAttrib),
+ },
+ {
+ "glGetActiveUniform",
+ reinterpret_cast<GLES2FunctionPointer>(glGetActiveUniform),
+ },
+ {
+ "glGetAttachedShaders",
+ reinterpret_cast<GLES2FunctionPointer>(glGetAttachedShaders),
+ },
+ {
+ "glGetAttribLocation",
+ reinterpret_cast<GLES2FunctionPointer>(glGetAttribLocation),
+ },
+ {
+ "glGetBooleanv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetBooleanv),
+ },
+ {
+ "glGetBufferParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetBufferParameteriv),
+ },
+ {
+ "glGetError",
+ reinterpret_cast<GLES2FunctionPointer>(glGetError),
+ },
+ {
+ "glGetFloatv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetFloatv),
+ },
+ {
+ "glGetFramebufferAttachmentParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glGetFramebufferAttachmentParameteriv),
+ },
+ {
+ "glGetIntegerv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetIntegerv),
+ },
+ {
+ "glGetProgramiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramiv),
+ },
+ {
+ "glGetProgramInfoLog",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramInfoLog),
+ },
+ {
+ "glGetRenderbufferParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetRenderbufferParameteriv),
+ },
+ {
+ "glGetShaderiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetShaderiv),
+ },
+ {
+ "glGetShaderInfoLog",
+ reinterpret_cast<GLES2FunctionPointer>(glGetShaderInfoLog),
+ },
+ {
+ "glGetShaderPrecisionFormat",
+ reinterpret_cast<GLES2FunctionPointer>(glGetShaderPrecisionFormat),
+ },
+ {
+ "glGetShaderSource",
+ reinterpret_cast<GLES2FunctionPointer>(glGetShaderSource),
+ },
+ {
+ "glGetString",
+ reinterpret_cast<GLES2FunctionPointer>(glGetString),
+ },
+ {
+ "glGetTexParameterfv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetTexParameterfv),
+ },
+ {
+ "glGetTexParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetTexParameteriv),
+ },
+ {
+ "glGetUniformfv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetUniformfv),
+ },
+ {
+ "glGetUniformiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetUniformiv),
+ },
+ {
+ "glGetUniformLocation",
+ reinterpret_cast<GLES2FunctionPointer>(glGetUniformLocation),
+ },
+ {
+ "glGetVertexAttribfv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetVertexAttribfv),
+ },
+ {
+ "glGetVertexAttribiv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetVertexAttribiv),
+ },
+ {
+ "glGetVertexAttribPointerv",
+ reinterpret_cast<GLES2FunctionPointer>(glGetVertexAttribPointerv),
+ },
+ {
+ "glHint",
+ reinterpret_cast<GLES2FunctionPointer>(glHint),
+ },
+ {
+ "glIsBuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glIsBuffer),
+ },
+ {
+ "glIsEnabled",
+ reinterpret_cast<GLES2FunctionPointer>(glIsEnabled),
+ },
+ {
+ "glIsFramebuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glIsFramebuffer),
+ },
+ {
+ "glIsProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glIsProgram),
+ },
+ {
+ "glIsRenderbuffer",
+ reinterpret_cast<GLES2FunctionPointer>(glIsRenderbuffer),
+ },
+ {
+ "glIsShader",
+ reinterpret_cast<GLES2FunctionPointer>(glIsShader),
+ },
+ {
+ "glIsTexture",
+ reinterpret_cast<GLES2FunctionPointer>(glIsTexture),
+ },
+ {
+ "glLineWidth",
+ reinterpret_cast<GLES2FunctionPointer>(glLineWidth),
+ },
+ {
+ "glLinkProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glLinkProgram),
+ },
+ {
+ "glPixelStorei",
+ reinterpret_cast<GLES2FunctionPointer>(glPixelStorei),
+ },
+ {
+ "glPolygonOffset",
+ reinterpret_cast<GLES2FunctionPointer>(glPolygonOffset),
+ },
+ {
+ "glReadPixels",
+ reinterpret_cast<GLES2FunctionPointer>(glReadPixels),
+ },
+ {
+ "glReleaseShaderCompiler",
+ reinterpret_cast<GLES2FunctionPointer>(glReleaseShaderCompiler),
+ },
+ {
+ "glRenderbufferStorage",
+ reinterpret_cast<GLES2FunctionPointer>(glRenderbufferStorage),
+ },
+ {
+ "glSampleCoverage",
+ reinterpret_cast<GLES2FunctionPointer>(glSampleCoverage),
+ },
+ {
+ "glScissor",
+ reinterpret_cast<GLES2FunctionPointer>(glScissor),
+ },
+ {
+ "glShaderBinary",
+ reinterpret_cast<GLES2FunctionPointer>(glShaderBinary),
+ },
+ {
+ "glShaderSource",
+ reinterpret_cast<GLES2FunctionPointer>(glShaderSource),
+ },
+ {
+ "glShallowFinishCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glShallowFinishCHROMIUM),
+ },
+ {
+ "glShallowFlushCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glShallowFlushCHROMIUM),
+ },
+ {
+ "glStencilFunc",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilFunc),
+ },
+ {
+ "glStencilFuncSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilFuncSeparate),
+ },
+ {
+ "glStencilMask",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilMask),
+ },
+ {
+ "glStencilMaskSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilMaskSeparate),
+ },
+ {
+ "glStencilOp",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilOp),
+ },
+ {
+ "glStencilOpSeparate",
+ reinterpret_cast<GLES2FunctionPointer>(glStencilOpSeparate),
+ },
+ {
+ "glTexImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glTexImage2D),
+ },
+ {
+ "glTexParameterf",
+ reinterpret_cast<GLES2FunctionPointer>(glTexParameterf),
+ },
+ {
+ "glTexParameterfv",
+ reinterpret_cast<GLES2FunctionPointer>(glTexParameterfv),
+ },
+ {
+ "glTexParameteri",
+ reinterpret_cast<GLES2FunctionPointer>(glTexParameteri),
+ },
+ {
+ "glTexParameteriv",
+ reinterpret_cast<GLES2FunctionPointer>(glTexParameteriv),
+ },
+ {
+ "glTexSubImage2D",
+ reinterpret_cast<GLES2FunctionPointer>(glTexSubImage2D),
+ },
+ {
+ "glUniform1f",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform1f),
+ },
+ {
+ "glUniform1fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform1fv),
+ },
+ {
+ "glUniform1i",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform1i),
+ },
+ {
+ "glUniform1iv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform1iv),
+ },
+ {
+ "glUniform2f",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform2f),
+ },
+ {
+ "glUniform2fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform2fv),
+ },
+ {
+ "glUniform2i",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform2i),
+ },
+ {
+ "glUniform2iv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform2iv),
+ },
+ {
+ "glUniform3f",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform3f),
+ },
+ {
+ "glUniform3fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform3fv),
+ },
+ {
+ "glUniform3i",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform3i),
+ },
+ {
+ "glUniform3iv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform3iv),
+ },
+ {
+ "glUniform4f",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform4f),
+ },
+ {
+ "glUniform4fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform4fv),
+ },
+ {
+ "glUniform4i",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform4i),
+ },
+ {
+ "glUniform4iv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniform4iv),
+ },
+ {
+ "glUniformMatrix2fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniformMatrix2fv),
+ },
+ {
+ "glUniformMatrix3fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniformMatrix3fv),
+ },
+ {
+ "glUniformMatrix4fv",
+ reinterpret_cast<GLES2FunctionPointer>(glUniformMatrix4fv),
+ },
+ {
+ "glUseProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glUseProgram),
+ },
+ {
+ "glValidateProgram",
+ reinterpret_cast<GLES2FunctionPointer>(glValidateProgram),
+ },
+ {
+ "glVertexAttrib1f",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib1f),
+ },
+ {
+ "glVertexAttrib1fv",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib1fv),
+ },
+ {
+ "glVertexAttrib2f",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib2f),
+ },
+ {
+ "glVertexAttrib2fv",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib2fv),
+ },
+ {
+ "glVertexAttrib3f",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib3f),
+ },
+ {
+ "glVertexAttrib3fv",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib3fv),
+ },
+ {
+ "glVertexAttrib4f",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib4f),
+ },
+ {
+ "glVertexAttrib4fv",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttrib4fv),
+ },
+ {
+ "glVertexAttribPointer",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttribPointer),
+ },
+ {
+ "glViewport",
+ reinterpret_cast<GLES2FunctionPointer>(glViewport),
+ },
+ {
+ "glBlitFramebufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glBlitFramebufferCHROMIUM),
+ },
+ {
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glRenderbufferStorageMultisampleCHROMIUM),
+ },
+ {
+ "glRenderbufferStorageMultisampleEXT",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glRenderbufferStorageMultisampleEXT),
+ },
+ {
+ "glFramebufferTexture2DMultisampleEXT",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glFramebufferTexture2DMultisampleEXT),
+ },
+ {
+ "glTexStorage2DEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glTexStorage2DEXT),
+ },
+ {
+ "glGenQueriesEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glGenQueriesEXT),
+ },
+ {
+ "glDeleteQueriesEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteQueriesEXT),
+ },
+ {
+ "glIsQueryEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glIsQueryEXT),
+ },
+ {
+ "glBeginQueryEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glBeginQueryEXT),
+ },
+ {
+ "glEndQueryEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glEndQueryEXT),
+ },
+ {
+ "glGetQueryivEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glGetQueryivEXT),
+ },
+ {
+ "glGetQueryObjectuivEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glGetQueryObjectuivEXT),
+ },
+ {
+ "glInsertEventMarkerEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glInsertEventMarkerEXT),
+ },
+ {
+ "glPushGroupMarkerEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glPushGroupMarkerEXT),
+ },
+ {
+ "glPopGroupMarkerEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glPopGroupMarkerEXT),
+ },
+ {
+ "glGenVertexArraysOES",
+ reinterpret_cast<GLES2FunctionPointer>(glGenVertexArraysOES),
+ },
+ {
+ "glDeleteVertexArraysOES",
+ reinterpret_cast<GLES2FunctionPointer>(glDeleteVertexArraysOES),
+ },
+ {
+ "glIsVertexArrayOES",
+ reinterpret_cast<GLES2FunctionPointer>(glIsVertexArrayOES),
+ },
+ {
+ "glBindVertexArrayOES",
+ reinterpret_cast<GLES2FunctionPointer>(glBindVertexArrayOES),
+ },
+ {
+ "glSwapBuffers",
+ reinterpret_cast<GLES2FunctionPointer>(glSwapBuffers),
+ },
+ {
+ "glGetMaxValueInBufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetMaxValueInBufferCHROMIUM),
+ },
+ {
+ "glEnableFeatureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glEnableFeatureCHROMIUM),
+ },
+ {
+ "glMapBufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMapBufferCHROMIUM),
+ },
+ {
+ "glUnmapBufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glUnmapBufferCHROMIUM),
+ },
+ {
+ "glMapImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMapImageCHROMIUM),
+ },
+ {
+ "glUnmapImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glUnmapImageCHROMIUM),
+ },
+ {
+ "glMapBufferSubDataCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMapBufferSubDataCHROMIUM),
+ },
+ {
+ "glUnmapBufferSubDataCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glUnmapBufferSubDataCHROMIUM),
+ },
+ {
+ "glMapTexSubImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMapTexSubImage2DCHROMIUM),
+ },
+ {
+ "glUnmapTexSubImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glUnmapTexSubImage2DCHROMIUM),
+ },
+ {
+ "glResizeCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glResizeCHROMIUM),
+ },
+ {
+ "glGetRequestableExtensionsCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetRequestableExtensionsCHROMIUM),
+ },
+ {
+ "glRequestExtensionCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glRequestExtensionCHROMIUM),
+ },
+ {
+ "glRateLimitOffscreenContextCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glRateLimitOffscreenContextCHROMIUM),
+ },
+ {
+ "glGetMultipleIntegervCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetMultipleIntegervCHROMIUM),
+ },
+ {
+ "glGetProgramInfoCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetProgramInfoCHROMIUM),
+ },
+ {
+ "glCreateStreamTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateStreamTextureCHROMIUM),
+ },
+ {
+ "glCreateImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateImageCHROMIUM),
+ },
+ {
+ "glDestroyImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glDestroyImageCHROMIUM),
+ },
+ {
+ "glGetImageParameterivCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGetImageParameterivCHROMIUM),
+ },
+ {
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glCreateGpuMemoryBufferImageCHROMIUM),
+ },
+ {
+ "glGetTranslatedShaderSourceANGLE",
+ reinterpret_cast<GLES2FunctionPointer>(glGetTranslatedShaderSourceANGLE),
+ },
+ {
+ "glPostSubBufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glPostSubBufferCHROMIUM),
+ },
+ {
+ "glTexImageIOSurface2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glTexImageIOSurface2DCHROMIUM),
+ },
+ {
+ "glCopyTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glCopyTextureCHROMIUM),
+ },
+ {
+ "glDrawArraysInstancedANGLE",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawArraysInstancedANGLE),
+ },
+ {
+ "glDrawElementsInstancedANGLE",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawElementsInstancedANGLE),
+ },
+ {
+ "glVertexAttribDivisorANGLE",
+ reinterpret_cast<GLES2FunctionPointer>(glVertexAttribDivisorANGLE),
+ },
+ {
+ "glGenMailboxCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glGenMailboxCHROMIUM),
+ },
+ {
+ "glProduceTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glProduceTextureCHROMIUM),
+ },
+ {
+ "glProduceTextureDirectCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glProduceTextureDirectCHROMIUM),
+ },
+ {
+ "glConsumeTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glConsumeTextureCHROMIUM),
+ },
+ {
+ "glCreateAndConsumeTextureCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glCreateAndConsumeTextureCHROMIUM),
+ },
+ {
+ "glBindUniformLocationCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glBindUniformLocationCHROMIUM),
+ },
+ {
+ "glBindTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glBindTexImage2DCHROMIUM),
+ },
+ {
+ "glReleaseTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glReleaseTexImage2DCHROMIUM),
+ },
+ {
+ "glTraceBeginCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glTraceBeginCHROMIUM),
+ },
+ {
+ "glTraceEndCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glTraceEndCHROMIUM),
+ },
+ {
+ "glAsyncTexSubImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glAsyncTexSubImage2DCHROMIUM),
+ },
+ {
+ "glAsyncTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glAsyncTexImage2DCHROMIUM),
+ },
+ {
+ "glWaitAsyncTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glWaitAsyncTexImage2DCHROMIUM),
+ },
+ {
+ "glWaitAllAsyncTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glWaitAllAsyncTexImage2DCHROMIUM),
+ },
+ {
+ "glDiscardFramebufferEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glDiscardFramebufferEXT),
+ },
+ {
+ "glLoseContextCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glLoseContextCHROMIUM),
+ },
+ {
+ "glInsertSyncPointCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glInsertSyncPointCHROMIUM),
+ },
+ {
+ "glWaitSyncPointCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glWaitSyncPointCHROMIUM),
+ },
+ {
+ "glDrawBuffersEXT",
+ reinterpret_cast<GLES2FunctionPointer>(glDrawBuffersEXT),
+ },
+ {
+ "glDiscardBackbufferCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glDiscardBackbufferCHROMIUM),
+ },
+ {
+ "glScheduleOverlayPlaneCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glScheduleOverlayPlaneCHROMIUM),
+ },
+ {
+ "glMatrixLoadfCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMatrixLoadfCHROMIUM),
+ },
+ {
+ "glMatrixLoadIdentityCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(glMatrixLoadIdentityCHROMIUM),
+ },
+ {
+ NULL,
+ NULL,
+ },
+};
+
+} // namespace gles2
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_c_lib_export.h b/gpu/command_buffer/client/gles2_c_lib_export.h
new file mode 100644
index 0000000..ceacc6e
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_c_lib_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_EXPORT_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(GLES2_C_LIB_IMPLEMENTATION)
+#define GLES2_C_LIB_EXPORT __declspec(dllexport)
+#else
+#define GLES2_C_LIB_EXPORT __declspec(dllimport)
+#endif // defined(GLES2_C_LIB_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(GLES2_C_LIB_IMPLEMENTATION)
+#define GLES2_C_LIB_EXPORT __attribute__((visibility("default")))
+#else
+#define GLES2_C_LIB_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define GLES2_C_LIB_EXPORT
+#endif
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_EXPORT_H_
diff --git a/gpu/command_buffer/client/gles2_cmd_helper.cc b/gpu/command_buffer/client/gles2_cmd_helper.cc
new file mode 100644
index 0000000..d52970a
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_cmd_helper.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+
+namespace gpu {
+namespace gles2 {
+
+GLES2CmdHelper::GLES2CmdHelper(CommandBuffer* command_buffer)
+ : CommandBufferHelper(command_buffer) {
+}
+
+GLES2CmdHelper::~GLES2CmdHelper() {
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
+
diff --git a/gpu/command_buffer/client/gles2_cmd_helper.h b/gpu/command_buffer/client/gles2_cmd_helper.h
new file mode 100644
index 0000000..af6cc5d
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_cmd_helper.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_H_
+
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+// A class that helps write GL command buffers.
+class GPU_EXPORT GLES2CmdHelper : public CommandBufferHelper {
+ public:
+ explicit GLES2CmdHelper(CommandBuffer* command_buffer);
+ virtual ~GLES2CmdHelper();
+
+ // Include the auto-generated part of this class. We split this because it
+ // means we can easily edit the non-auto generated parts right here in this
+ // file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_cmd_helper_autogen.h"
+
+ // Helpers that could not be auto-generated.
+ // TODO(gman): Auto generate these.
+ void CreateAndConsumeTextureCHROMIUMImmediate(GLenum target,
+ uint32_t client_id,
+ const GLbyte* _mailbox) {
+ const uint32_t size =
+ gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::CreateAndConsumeTextureCHROMIUMImmediate>(size);
+ if (c) {
+ c->Init(target, client_id, _mailbox);
+ }
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GLES2CmdHelper);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_H_
+
diff --git a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
new file mode 100644
index 0000000..c8432da
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -0,0 +1,1932 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
+
+void ActiveTexture(GLenum texture) {
+ gles2::cmds::ActiveTexture* c = GetCmdSpace<gles2::cmds::ActiveTexture>();
+ if (c) {
+ c->Init(texture);
+ }
+}
+
+void AttachShader(GLuint program, GLuint shader) {
+ gles2::cmds::AttachShader* c = GetCmdSpace<gles2::cmds::AttachShader>();
+ if (c) {
+ c->Init(program, shader);
+ }
+}
+
+void BindAttribLocationBucket(GLuint program,
+ GLuint index,
+ uint32_t name_bucket_id) {
+ gles2::cmds::BindAttribLocationBucket* c =
+ GetCmdSpace<gles2::cmds::BindAttribLocationBucket>();
+ if (c) {
+ c->Init(program, index, name_bucket_id);
+ }
+}
+
+void BindBuffer(GLenum target, GLuint buffer) {
+ gles2::cmds::BindBuffer* c = GetCmdSpace<gles2::cmds::BindBuffer>();
+ if (c) {
+ c->Init(target, buffer);
+ }
+}
+
+void BindFramebuffer(GLenum target, GLuint framebuffer) {
+ gles2::cmds::BindFramebuffer* c = GetCmdSpace<gles2::cmds::BindFramebuffer>();
+ if (c) {
+ c->Init(target, framebuffer);
+ }
+}
+
+void BindRenderbuffer(GLenum target, GLuint renderbuffer) {
+ gles2::cmds::BindRenderbuffer* c =
+ GetCmdSpace<gles2::cmds::BindRenderbuffer>();
+ if (c) {
+ c->Init(target, renderbuffer);
+ }
+}
+
+void BindTexture(GLenum target, GLuint texture) {
+ gles2::cmds::BindTexture* c = GetCmdSpace<gles2::cmds::BindTexture>();
+ if (c) {
+ c->Init(target, texture);
+ }
+}
+
+void BlendColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha) {
+ gles2::cmds::BlendColor* c = GetCmdSpace<gles2::cmds::BlendColor>();
+ if (c) {
+ c->Init(red, green, blue, alpha);
+ }
+}
+
+void BlendEquation(GLenum mode) {
+ gles2::cmds::BlendEquation* c = GetCmdSpace<gles2::cmds::BlendEquation>();
+ if (c) {
+ c->Init(mode);
+ }
+}
+
+void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) {
+ gles2::cmds::BlendEquationSeparate* c =
+ GetCmdSpace<gles2::cmds::BlendEquationSeparate>();
+ if (c) {
+ c->Init(modeRGB, modeAlpha);
+ }
+}
+
+void BlendFunc(GLenum sfactor, GLenum dfactor) {
+ gles2::cmds::BlendFunc* c = GetCmdSpace<gles2::cmds::BlendFunc>();
+ if (c) {
+ c->Init(sfactor, dfactor);
+ }
+}
+
+void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ gles2::cmds::BlendFuncSeparate* c =
+ GetCmdSpace<gles2::cmds::BlendFuncSeparate>();
+ if (c) {
+ c->Init(srcRGB, dstRGB, srcAlpha, dstAlpha);
+ }
+}
+
+void BufferData(GLenum target,
+ GLsizeiptr size,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset,
+ GLenum usage) {
+ gles2::cmds::BufferData* c = GetCmdSpace<gles2::cmds::BufferData>();
+ if (c) {
+ c->Init(target, size, data_shm_id, data_shm_offset, usage);
+ }
+}
+
+void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset) {
+ gles2::cmds::BufferSubData* c = GetCmdSpace<gles2::cmds::BufferSubData>();
+ if (c) {
+ c->Init(target, offset, size, data_shm_id, data_shm_offset);
+ }
+}
+
+void CheckFramebufferStatus(GLenum target,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::CheckFramebufferStatus* c =
+ GetCmdSpace<gles2::cmds::CheckFramebufferStatus>();
+ if (c) {
+ c->Init(target, result_shm_id, result_shm_offset);
+ }
+}
+
+void Clear(GLbitfield mask) {
+ gles2::cmds::Clear* c = GetCmdSpace<gles2::cmds::Clear>();
+ if (c) {
+ c->Init(mask);
+ }
+}
+
+void ClearColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha) {
+ gles2::cmds::ClearColor* c = GetCmdSpace<gles2::cmds::ClearColor>();
+ if (c) {
+ c->Init(red, green, blue, alpha);
+ }
+}
+
+void ClearDepthf(GLclampf depth) {
+ gles2::cmds::ClearDepthf* c = GetCmdSpace<gles2::cmds::ClearDepthf>();
+ if (c) {
+ c->Init(depth);
+ }
+}
+
+void ClearStencil(GLint s) {
+ gles2::cmds::ClearStencil* c = GetCmdSpace<gles2::cmds::ClearStencil>();
+ if (c) {
+ c->Init(s);
+ }
+}
+
+void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ gles2::cmds::ColorMask* c = GetCmdSpace<gles2::cmds::ColorMask>();
+ if (c) {
+ c->Init(red, green, blue, alpha);
+ }
+}
+
+void CompileShader(GLuint shader) {
+ gles2::cmds::CompileShader* c = GetCmdSpace<gles2::cmds::CompileShader>();
+ if (c) {
+ c->Init(shader);
+ }
+}
+
+void CompressedTexImage2DBucket(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLuint bucket_id) {
+ gles2::cmds::CompressedTexImage2DBucket* c =
+ GetCmdSpace<gles2::cmds::CompressedTexImage2DBucket>();
+ if (c) {
+ c->Init(target, level, internalformat, width, height, bucket_id);
+ }
+}
+
+void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLsizei imageSize,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset) {
+ gles2::cmds::CompressedTexImage2D* c =
+ GetCmdSpace<gles2::cmds::CompressedTexImage2D>();
+ if (c) {
+ c->Init(target,
+ level,
+ internalformat,
+ width,
+ height,
+ imageSize,
+ data_shm_id,
+ data_shm_offset);
+ }
+}
+
+void CompressedTexSubImage2DBucket(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLuint bucket_id) {
+ gles2::cmds::CompressedTexSubImage2DBucket* c =
+ GetCmdSpace<gles2::cmds::CompressedTexSubImage2DBucket>();
+ if (c) {
+ c->Init(target, level, xoffset, yoffset, width, height, format, bucket_id);
+ }
+}
+
+void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset) {
+ gles2::cmds::CompressedTexSubImage2D* c =
+ GetCmdSpace<gles2::cmds::CompressedTexSubImage2D>();
+ if (c) {
+ c->Init(target,
+ level,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ format,
+ imageSize,
+ data_shm_id,
+ data_shm_offset);
+ }
+}
+
+void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::CopyTexImage2D* c = GetCmdSpace<gles2::cmds::CopyTexImage2D>();
+ if (c) {
+ c->Init(target, level, internalformat, x, y, width, height);
+ }
+}
+
+void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::CopyTexSubImage2D* c =
+ GetCmdSpace<gles2::cmds::CopyTexSubImage2D>();
+ if (c) {
+ c->Init(target, level, xoffset, yoffset, x, y, width, height);
+ }
+}
+
+void CreateProgram(uint32_t client_id) {
+ gles2::cmds::CreateProgram* c = GetCmdSpace<gles2::cmds::CreateProgram>();
+ if (c) {
+ c->Init(client_id);
+ }
+}
+
+void CreateShader(GLenum type, uint32_t client_id) {
+ gles2::cmds::CreateShader* c = GetCmdSpace<gles2::cmds::CreateShader>();
+ if (c) {
+ c->Init(type, client_id);
+ }
+}
+
+void CullFace(GLenum mode) {
+ gles2::cmds::CullFace* c = GetCmdSpace<gles2::cmds::CullFace>();
+ if (c) {
+ c->Init(mode);
+ }
+}
+
+void DeleteBuffersImmediate(GLsizei n, const GLuint* buffers) {
+ const uint32_t size = gles2::cmds::DeleteBuffersImmediate::ComputeSize(n);
+ gles2::cmds::DeleteBuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteBuffersImmediate>(size);
+ if (c) {
+ c->Init(n, buffers);
+ }
+}
+
+void DeleteFramebuffersImmediate(GLsizei n, const GLuint* framebuffers) {
+ const uint32_t size =
+ gles2::cmds::DeleteFramebuffersImmediate::ComputeSize(n);
+ gles2::cmds::DeleteFramebuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteFramebuffersImmediate>(
+ size);
+ if (c) {
+ c->Init(n, framebuffers);
+ }
+}
+
+void DeleteProgram(GLuint program) {
+ gles2::cmds::DeleteProgram* c = GetCmdSpace<gles2::cmds::DeleteProgram>();
+ if (c) {
+ c->Init(program);
+ }
+}
+
+void DeleteRenderbuffersImmediate(GLsizei n, const GLuint* renderbuffers) {
+ const uint32_t size =
+ gles2::cmds::DeleteRenderbuffersImmediate::ComputeSize(n);
+ gles2::cmds::DeleteRenderbuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteRenderbuffersImmediate>(
+ size);
+ if (c) {
+ c->Init(n, renderbuffers);
+ }
+}
+
+void DeleteShader(GLuint shader) {
+ gles2::cmds::DeleteShader* c = GetCmdSpace<gles2::cmds::DeleteShader>();
+ if (c) {
+ c->Init(shader);
+ }
+}
+
+void DeleteTexturesImmediate(GLsizei n, const GLuint* textures) {
+ const uint32_t size = gles2::cmds::DeleteTexturesImmediate::ComputeSize(n);
+ gles2::cmds::DeleteTexturesImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteTexturesImmediate>(size);
+ if (c) {
+ c->Init(n, textures);
+ }
+}
+
+void DepthFunc(GLenum func) {
+ gles2::cmds::DepthFunc* c = GetCmdSpace<gles2::cmds::DepthFunc>();
+ if (c) {
+ c->Init(func);
+ }
+}
+
+void DepthMask(GLboolean flag) {
+ gles2::cmds::DepthMask* c = GetCmdSpace<gles2::cmds::DepthMask>();
+ if (c) {
+ c->Init(flag);
+ }
+}
+
+void DepthRangef(GLclampf zNear, GLclampf zFar) {
+ gles2::cmds::DepthRangef* c = GetCmdSpace<gles2::cmds::DepthRangef>();
+ if (c) {
+ c->Init(zNear, zFar);
+ }
+}
+
+void DetachShader(GLuint program, GLuint shader) {
+ gles2::cmds::DetachShader* c = GetCmdSpace<gles2::cmds::DetachShader>();
+ if (c) {
+ c->Init(program, shader);
+ }
+}
+
+void Disable(GLenum cap) {
+ gles2::cmds::Disable* c = GetCmdSpace<gles2::cmds::Disable>();
+ if (c) {
+ c->Init(cap);
+ }
+}
+
+void DisableVertexAttribArray(GLuint index) {
+ gles2::cmds::DisableVertexAttribArray* c =
+ GetCmdSpace<gles2::cmds::DisableVertexAttribArray>();
+ if (c) {
+ c->Init(index);
+ }
+}
+
+void DrawArrays(GLenum mode, GLint first, GLsizei count) {
+ gles2::cmds::DrawArrays* c = GetCmdSpace<gles2::cmds::DrawArrays>();
+ if (c) {
+ c->Init(mode, first, count);
+ }
+}
+
+void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ GLuint index_offset) {
+ gles2::cmds::DrawElements* c = GetCmdSpace<gles2::cmds::DrawElements>();
+ if (c) {
+ c->Init(mode, count, type, index_offset);
+ }
+}
+
+void Enable(GLenum cap) {
+ gles2::cmds::Enable* c = GetCmdSpace<gles2::cmds::Enable>();
+ if (c) {
+ c->Init(cap);
+ }
+}
+
+void EnableVertexAttribArray(GLuint index) {
+ gles2::cmds::EnableVertexAttribArray* c =
+ GetCmdSpace<gles2::cmds::EnableVertexAttribArray>();
+ if (c) {
+ c->Init(index);
+ }
+}
+
+void Finish() {
+ gles2::cmds::Finish* c = GetCmdSpace<gles2::cmds::Finish>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void Flush() {
+ gles2::cmds::Flush* c = GetCmdSpace<gles2::cmds::Flush>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) {
+ gles2::cmds::FramebufferRenderbuffer* c =
+ GetCmdSpace<gles2::cmds::FramebufferRenderbuffer>();
+ if (c) {
+ c->Init(target, attachment, renderbuffertarget, renderbuffer);
+ }
+}
+
+void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture) {
+ gles2::cmds::FramebufferTexture2D* c =
+ GetCmdSpace<gles2::cmds::FramebufferTexture2D>();
+ if (c) {
+ c->Init(target, attachment, textarget, texture);
+ }
+}
+
+void FrontFace(GLenum mode) {
+ gles2::cmds::FrontFace* c = GetCmdSpace<gles2::cmds::FrontFace>();
+ if (c) {
+ c->Init(mode);
+ }
+}
+
+void GenBuffersImmediate(GLsizei n, GLuint* buffers) {
+ const uint32_t size = gles2::cmds::GenBuffersImmediate::ComputeSize(n);
+ gles2::cmds::GenBuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenBuffersImmediate>(size);
+ if (c) {
+ c->Init(n, buffers);
+ }
+}
+
+void GenerateMipmap(GLenum target) {
+ gles2::cmds::GenerateMipmap* c = GetCmdSpace<gles2::cmds::GenerateMipmap>();
+ if (c) {
+ c->Init(target);
+ }
+}
+
+void GenFramebuffersImmediate(GLsizei n, GLuint* framebuffers) {
+ const uint32_t size = gles2::cmds::GenFramebuffersImmediate::ComputeSize(n);
+ gles2::cmds::GenFramebuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenFramebuffersImmediate>(
+ size);
+ if (c) {
+ c->Init(n, framebuffers);
+ }
+}
+
+void GenRenderbuffersImmediate(GLsizei n, GLuint* renderbuffers) {
+ const uint32_t size = gles2::cmds::GenRenderbuffersImmediate::ComputeSize(n);
+ gles2::cmds::GenRenderbuffersImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenRenderbuffersImmediate>(
+ size);
+ if (c) {
+ c->Init(n, renderbuffers);
+ }
+}
+
+void GenTexturesImmediate(GLsizei n, GLuint* textures) {
+ const uint32_t size = gles2::cmds::GenTexturesImmediate::ComputeSize(n);
+ gles2::cmds::GenTexturesImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenTexturesImmediate>(size);
+ if (c) {
+ c->Init(n, textures);
+ }
+}
+
+void GetActiveAttrib(GLuint program,
+ GLuint index,
+ uint32_t name_bucket_id,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetActiveAttrib* c = GetCmdSpace<gles2::cmds::GetActiveAttrib>();
+ if (c) {
+ c->Init(program, index, name_bucket_id, result_shm_id, result_shm_offset);
+ }
+}
+
+void GetActiveUniform(GLuint program,
+ GLuint index,
+ uint32_t name_bucket_id,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetActiveUniform* c =
+ GetCmdSpace<gles2::cmds::GetActiveUniform>();
+ if (c) {
+ c->Init(program, index, name_bucket_id, result_shm_id, result_shm_offset);
+ }
+}
+
+void GetAttachedShaders(GLuint program,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset,
+ uint32_t result_size) {
+ gles2::cmds::GetAttachedShaders* c =
+ GetCmdSpace<gles2::cmds::GetAttachedShaders>();
+ if (c) {
+ c->Init(program, result_shm_id, result_shm_offset, result_size);
+ }
+}
+
+void GetAttribLocation(GLuint program,
+ uint32_t name_bucket_id,
+ uint32_t location_shm_id,
+ uint32_t location_shm_offset) {
+ gles2::cmds::GetAttribLocation* c =
+ GetCmdSpace<gles2::cmds::GetAttribLocation>();
+ if (c) {
+ c->Init(program, name_bucket_id, location_shm_id, location_shm_offset);
+ }
+}
+
+void GetBooleanv(GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetBooleanv* c = GetCmdSpace<gles2::cmds::GetBooleanv>();
+ if (c) {
+ c->Init(pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetBufferParameteriv* c =
+ GetCmdSpace<gles2::cmds::GetBufferParameteriv>();
+ if (c) {
+ c->Init(target, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetError(uint32_t result_shm_id, uint32_t result_shm_offset) {
+ gles2::cmds::GetError* c = GetCmdSpace<gles2::cmds::GetError>();
+ if (c) {
+ c->Init(result_shm_id, result_shm_offset);
+ }
+}
+
+void GetFloatv(GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetFloatv* c = GetCmdSpace<gles2::cmds::GetFloatv>();
+ if (c) {
+ c->Init(pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetFramebufferAttachmentParameteriv* c =
+ GetCmdSpace<gles2::cmds::GetFramebufferAttachmentParameteriv>();
+ if (c) {
+ c->Init(target, attachment, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetIntegerv(GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetIntegerv* c = GetCmdSpace<gles2::cmds::GetIntegerv>();
+ if (c) {
+ c->Init(pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetProgramiv(GLuint program,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetProgramiv* c = GetCmdSpace<gles2::cmds::GetProgramiv>();
+ if (c) {
+ c->Init(program, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetProgramInfoLog(GLuint program, uint32_t bucket_id) {
+ gles2::cmds::GetProgramInfoLog* c =
+ GetCmdSpace<gles2::cmds::GetProgramInfoLog>();
+ if (c) {
+ c->Init(program, bucket_id);
+ }
+}
+
+void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetRenderbufferParameteriv* c =
+ GetCmdSpace<gles2::cmds::GetRenderbufferParameteriv>();
+ if (c) {
+ c->Init(target, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetShaderiv(GLuint shader,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetShaderiv* c = GetCmdSpace<gles2::cmds::GetShaderiv>();
+ if (c) {
+ c->Init(shader, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetShaderInfoLog(GLuint shader, uint32_t bucket_id) {
+ gles2::cmds::GetShaderInfoLog* c =
+ GetCmdSpace<gles2::cmds::GetShaderInfoLog>();
+ if (c) {
+ c->Init(shader, bucket_id);
+ }
+}
+
+void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetShaderPrecisionFormat* c =
+ GetCmdSpace<gles2::cmds::GetShaderPrecisionFormat>();
+ if (c) {
+ c->Init(shadertype, precisiontype, result_shm_id, result_shm_offset);
+ }
+}
+
+void GetShaderSource(GLuint shader, uint32_t bucket_id) {
+ gles2::cmds::GetShaderSource* c = GetCmdSpace<gles2::cmds::GetShaderSource>();
+ if (c) {
+ c->Init(shader, bucket_id);
+ }
+}
+
+void GetString(GLenum name, uint32_t bucket_id) {
+ gles2::cmds::GetString* c = GetCmdSpace<gles2::cmds::GetString>();
+ if (c) {
+ c->Init(name, bucket_id);
+ }
+}
+
+void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetTexParameterfv* c =
+ GetCmdSpace<gles2::cmds::GetTexParameterfv>();
+ if (c) {
+ c->Init(target, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetTexParameteriv(GLenum target,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetTexParameteriv* c =
+ GetCmdSpace<gles2::cmds::GetTexParameteriv>();
+ if (c) {
+ c->Init(target, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetUniformfv(GLuint program,
+ GLint location,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetUniformfv* c = GetCmdSpace<gles2::cmds::GetUniformfv>();
+ if (c) {
+ c->Init(program, location, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetUniformiv(GLuint program,
+ GLint location,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetUniformiv* c = GetCmdSpace<gles2::cmds::GetUniformiv>();
+ if (c) {
+ c->Init(program, location, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetUniformLocation(GLuint program,
+ uint32_t name_bucket_id,
+ uint32_t location_shm_id,
+ uint32_t location_shm_offset) {
+ gles2::cmds::GetUniformLocation* c =
+ GetCmdSpace<gles2::cmds::GetUniformLocation>();
+ if (c) {
+ c->Init(program, name_bucket_id, location_shm_id, location_shm_offset);
+ }
+}
+
+void GetVertexAttribfv(GLuint index,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetVertexAttribfv* c =
+ GetCmdSpace<gles2::cmds::GetVertexAttribfv>();
+ if (c) {
+ c->Init(index, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetVertexAttribiv(GLuint index,
+ GLenum pname,
+ uint32_t params_shm_id,
+ uint32_t params_shm_offset) {
+ gles2::cmds::GetVertexAttribiv* c =
+ GetCmdSpace<gles2::cmds::GetVertexAttribiv>();
+ if (c) {
+ c->Init(index, pname, params_shm_id, params_shm_offset);
+ }
+}
+
+void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ uint32_t pointer_shm_id,
+ uint32_t pointer_shm_offset) {
+ gles2::cmds::GetVertexAttribPointerv* c =
+ GetCmdSpace<gles2::cmds::GetVertexAttribPointerv>();
+ if (c) {
+ c->Init(index, pname, pointer_shm_id, pointer_shm_offset);
+ }
+}
+
+void Hint(GLenum target, GLenum mode) {
+ gles2::cmds::Hint* c = GetCmdSpace<gles2::cmds::Hint>();
+ if (c) {
+ c->Init(target, mode);
+ }
+}
+
+void IsBuffer(GLuint buffer,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsBuffer* c = GetCmdSpace<gles2::cmds::IsBuffer>();
+ if (c) {
+ c->Init(buffer, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsEnabled(GLenum cap, uint32_t result_shm_id, uint32_t result_shm_offset) {
+ gles2::cmds::IsEnabled* c = GetCmdSpace<gles2::cmds::IsEnabled>();
+ if (c) {
+ c->Init(cap, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsFramebuffer(GLuint framebuffer,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsFramebuffer* c = GetCmdSpace<gles2::cmds::IsFramebuffer>();
+ if (c) {
+ c->Init(framebuffer, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsProgram(GLuint program,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsProgram* c = GetCmdSpace<gles2::cmds::IsProgram>();
+ if (c) {
+ c->Init(program, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsRenderbuffer(GLuint renderbuffer,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsRenderbuffer* c = GetCmdSpace<gles2::cmds::IsRenderbuffer>();
+ if (c) {
+ c->Init(renderbuffer, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsShader(GLuint shader,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsShader* c = GetCmdSpace<gles2::cmds::IsShader>();
+ if (c) {
+ c->Init(shader, result_shm_id, result_shm_offset);
+ }
+}
+
+void IsTexture(GLuint texture,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsTexture* c = GetCmdSpace<gles2::cmds::IsTexture>();
+ if (c) {
+ c->Init(texture, result_shm_id, result_shm_offset);
+ }
+}
+
+void LineWidth(GLfloat width) {
+ gles2::cmds::LineWidth* c = GetCmdSpace<gles2::cmds::LineWidth>();
+ if (c) {
+ c->Init(width);
+ }
+}
+
+void LinkProgram(GLuint program) {
+ gles2::cmds::LinkProgram* c = GetCmdSpace<gles2::cmds::LinkProgram>();
+ if (c) {
+ c->Init(program);
+ }
+}
+
+void PixelStorei(GLenum pname, GLint param) {
+ gles2::cmds::PixelStorei* c = GetCmdSpace<gles2::cmds::PixelStorei>();
+ if (c) {
+ c->Init(pname, param);
+ }
+}
+
+void PolygonOffset(GLfloat factor, GLfloat units) {
+ gles2::cmds::PolygonOffset* c = GetCmdSpace<gles2::cmds::PolygonOffset>();
+ if (c) {
+ c->Init(factor, units);
+ }
+}
+
+void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t pixels_shm_id,
+ uint32_t pixels_shm_offset,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset,
+ GLboolean async) {
+ gles2::cmds::ReadPixels* c = GetCmdSpace<gles2::cmds::ReadPixels>();
+ if (c) {
+ c->Init(x,
+ y,
+ width,
+ height,
+ format,
+ type,
+ pixels_shm_id,
+ pixels_shm_offset,
+ result_shm_id,
+ result_shm_offset,
+ async);
+ }
+}
+
+void ReleaseShaderCompiler() {
+ gles2::cmds::ReleaseShaderCompiler* c =
+ GetCmdSpace<gles2::cmds::ReleaseShaderCompiler>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::RenderbufferStorage* c =
+ GetCmdSpace<gles2::cmds::RenderbufferStorage>();
+ if (c) {
+ c->Init(target, internalformat, width, height);
+ }
+}
+
+void SampleCoverage(GLclampf value, GLboolean invert) {
+ gles2::cmds::SampleCoverage* c = GetCmdSpace<gles2::cmds::SampleCoverage>();
+ if (c) {
+ c->Init(value, invert);
+ }
+}
+
+void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) {
+ gles2::cmds::Scissor* c = GetCmdSpace<gles2::cmds::Scissor>();
+ if (c) {
+ c->Init(x, y, width, height);
+ }
+}
+
+void ShaderBinary(GLsizei n,
+ uint32_t shaders_shm_id,
+ uint32_t shaders_shm_offset,
+ GLenum binaryformat,
+ uint32_t binary_shm_id,
+ uint32_t binary_shm_offset,
+ GLsizei length) {
+ gles2::cmds::ShaderBinary* c = GetCmdSpace<gles2::cmds::ShaderBinary>();
+ if (c) {
+ c->Init(n,
+ shaders_shm_id,
+ shaders_shm_offset,
+ binaryformat,
+ binary_shm_id,
+ binary_shm_offset,
+ length);
+ }
+}
+
+void ShaderSourceBucket(GLuint shader, uint32_t data_bucket_id) {
+ gles2::cmds::ShaderSourceBucket* c =
+ GetCmdSpace<gles2::cmds::ShaderSourceBucket>();
+ if (c) {
+ c->Init(shader, data_bucket_id);
+ }
+}
+
+void StencilFunc(GLenum func, GLint ref, GLuint mask) {
+ gles2::cmds::StencilFunc* c = GetCmdSpace<gles2::cmds::StencilFunc>();
+ if (c) {
+ c->Init(func, ref, mask);
+ }
+}
+
+void StencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask) {
+ gles2::cmds::StencilFuncSeparate* c =
+ GetCmdSpace<gles2::cmds::StencilFuncSeparate>();
+ if (c) {
+ c->Init(face, func, ref, mask);
+ }
+}
+
+void StencilMask(GLuint mask) {
+ gles2::cmds::StencilMask* c = GetCmdSpace<gles2::cmds::StencilMask>();
+ if (c) {
+ c->Init(mask);
+ }
+}
+
+void StencilMaskSeparate(GLenum face, GLuint mask) {
+ gles2::cmds::StencilMaskSeparate* c =
+ GetCmdSpace<gles2::cmds::StencilMaskSeparate>();
+ if (c) {
+ c->Init(face, mask);
+ }
+}
+
+void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) {
+ gles2::cmds::StencilOp* c = GetCmdSpace<gles2::cmds::StencilOp>();
+ if (c) {
+ c->Init(fail, zfail, zpass);
+ }
+}
+
+void StencilOpSeparate(GLenum face, GLenum fail, GLenum zfail, GLenum zpass) {
+ gles2::cmds::StencilOpSeparate* c =
+ GetCmdSpace<gles2::cmds::StencilOpSeparate>();
+ if (c) {
+ c->Init(face, fail, zfail, zpass);
+ }
+}
+
+void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t pixels_shm_id,
+ uint32_t pixels_shm_offset) {
+ gles2::cmds::TexImage2D* c = GetCmdSpace<gles2::cmds::TexImage2D>();
+ if (c) {
+ c->Init(target,
+ level,
+ internalformat,
+ width,
+ height,
+ format,
+ type,
+ pixels_shm_id,
+ pixels_shm_offset);
+ }
+}
+
+void TexParameterf(GLenum target, GLenum pname, GLfloat param) {
+ gles2::cmds::TexParameterf* c = GetCmdSpace<gles2::cmds::TexParameterf>();
+ if (c) {
+ c->Init(target, pname, param);
+ }
+}
+
+void TexParameterfvImmediate(GLenum target,
+ GLenum pname,
+ const GLfloat* params) {
+ const uint32_t size = gles2::cmds::TexParameterfvImmediate::ComputeSize();
+ gles2::cmds::TexParameterfvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::TexParameterfvImmediate>(size);
+ if (c) {
+ c->Init(target, pname, params);
+ }
+}
+
+void TexParameteri(GLenum target, GLenum pname, GLint param) {
+ gles2::cmds::TexParameteri* c = GetCmdSpace<gles2::cmds::TexParameteri>();
+ if (c) {
+ c->Init(target, pname, param);
+ }
+}
+
+void TexParameterivImmediate(GLenum target, GLenum pname, const GLint* params) {
+ const uint32_t size = gles2::cmds::TexParameterivImmediate::ComputeSize();
+ gles2::cmds::TexParameterivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::TexParameterivImmediate>(size);
+ if (c) {
+ c->Init(target, pname, params);
+ }
+}
+
+void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t pixels_shm_id,
+ uint32_t pixels_shm_offset,
+ GLboolean internal) {
+ gles2::cmds::TexSubImage2D* c = GetCmdSpace<gles2::cmds::TexSubImage2D>();
+ if (c) {
+ c->Init(target,
+ level,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ format,
+ type,
+ pixels_shm_id,
+ pixels_shm_offset,
+ internal);
+ }
+}
+
+void Uniform1f(GLint location, GLfloat x) {
+ gles2::cmds::Uniform1f* c = GetCmdSpace<gles2::cmds::Uniform1f>();
+ if (c) {
+ c->Init(location, x);
+ }
+}
+
+void Uniform1fvImmediate(GLint location, GLsizei count, const GLfloat* v) {
+ const uint32_t size = gles2::cmds::Uniform1fvImmediate::ComputeSize(count);
+ gles2::cmds::Uniform1fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform1fvImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform1i(GLint location, GLint x) {
+ gles2::cmds::Uniform1i* c = GetCmdSpace<gles2::cmds::Uniform1i>();
+ if (c) {
+ c->Init(location, x);
+ }
+}
+
+void Uniform1ivImmediate(GLint location, GLsizei count, const GLint* v) {
+ const uint32_t size = gles2::cmds::Uniform1ivImmediate::ComputeSize(count);
+ gles2::cmds::Uniform1ivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform1ivImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform2f(GLint location, GLfloat x, GLfloat y) {
+ gles2::cmds::Uniform2f* c = GetCmdSpace<gles2::cmds::Uniform2f>();
+ if (c) {
+ c->Init(location, x, y);
+ }
+}
+
+void Uniform2fvImmediate(GLint location, GLsizei count, const GLfloat* v) {
+ const uint32_t size = gles2::cmds::Uniform2fvImmediate::ComputeSize(count);
+ gles2::cmds::Uniform2fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform2fvImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform2i(GLint location, GLint x, GLint y) {
+ gles2::cmds::Uniform2i* c = GetCmdSpace<gles2::cmds::Uniform2i>();
+ if (c) {
+ c->Init(location, x, y);
+ }
+}
+
+void Uniform2ivImmediate(GLint location, GLsizei count, const GLint* v) {
+ const uint32_t size = gles2::cmds::Uniform2ivImmediate::ComputeSize(count);
+ gles2::cmds::Uniform2ivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform2ivImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform3f(GLint location, GLfloat x, GLfloat y, GLfloat z) {
+ gles2::cmds::Uniform3f* c = GetCmdSpace<gles2::cmds::Uniform3f>();
+ if (c) {
+ c->Init(location, x, y, z);
+ }
+}
+
+void Uniform3fvImmediate(GLint location, GLsizei count, const GLfloat* v) {
+ const uint32_t size = gles2::cmds::Uniform3fvImmediate::ComputeSize(count);
+ gles2::cmds::Uniform3fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform3fvImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform3i(GLint location, GLint x, GLint y, GLint z) {
+ gles2::cmds::Uniform3i* c = GetCmdSpace<gles2::cmds::Uniform3i>();
+ if (c) {
+ c->Init(location, x, y, z);
+ }
+}
+
+void Uniform3ivImmediate(GLint location, GLsizei count, const GLint* v) {
+ const uint32_t size = gles2::cmds::Uniform3ivImmediate::ComputeSize(count);
+ gles2::cmds::Uniform3ivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform3ivImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform4f(GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w) {
+ gles2::cmds::Uniform4f* c = GetCmdSpace<gles2::cmds::Uniform4f>();
+ if (c) {
+ c->Init(location, x, y, z, w);
+ }
+}
+
+void Uniform4fvImmediate(GLint location, GLsizei count, const GLfloat* v) {
+ const uint32_t size = gles2::cmds::Uniform4fvImmediate::ComputeSize(count);
+ gles2::cmds::Uniform4fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform4fvImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void Uniform4i(GLint location, GLint x, GLint y, GLint z, GLint w) {
+ gles2::cmds::Uniform4i* c = GetCmdSpace<gles2::cmds::Uniform4i>();
+ if (c) {
+ c->Init(location, x, y, z, w);
+ }
+}
+
+void Uniform4ivImmediate(GLint location, GLsizei count, const GLint* v) {
+ const uint32_t size = gles2::cmds::Uniform4ivImmediate::ComputeSize(count);
+ gles2::cmds::Uniform4ivImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::Uniform4ivImmediate>(size);
+ if (c) {
+ c->Init(location, count, v);
+ }
+}
+
+void UniformMatrix2fvImmediate(GLint location,
+ GLsizei count,
+ const GLfloat* value) {
+ const uint32_t size =
+ gles2::cmds::UniformMatrix2fvImmediate::ComputeSize(count);
+ gles2::cmds::UniformMatrix2fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::UniformMatrix2fvImmediate>(
+ size);
+ if (c) {
+ c->Init(location, count, value);
+ }
+}
+
+void UniformMatrix3fvImmediate(GLint location,
+ GLsizei count,
+ const GLfloat* value) {
+ const uint32_t size =
+ gles2::cmds::UniformMatrix3fvImmediate::ComputeSize(count);
+ gles2::cmds::UniformMatrix3fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::UniformMatrix3fvImmediate>(
+ size);
+ if (c) {
+ c->Init(location, count, value);
+ }
+}
+
+void UniformMatrix4fvImmediate(GLint location,
+ GLsizei count,
+ const GLfloat* value) {
+ const uint32_t size =
+ gles2::cmds::UniformMatrix4fvImmediate::ComputeSize(count);
+ gles2::cmds::UniformMatrix4fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::UniformMatrix4fvImmediate>(
+ size);
+ if (c) {
+ c->Init(location, count, value);
+ }
+}
+
+void UseProgram(GLuint program) {
+ gles2::cmds::UseProgram* c = GetCmdSpace<gles2::cmds::UseProgram>();
+ if (c) {
+ c->Init(program);
+ }
+}
+
+void ValidateProgram(GLuint program) {
+ gles2::cmds::ValidateProgram* c = GetCmdSpace<gles2::cmds::ValidateProgram>();
+ if (c) {
+ c->Init(program);
+ }
+}
+
+void VertexAttrib1f(GLuint indx, GLfloat x) {
+ gles2::cmds::VertexAttrib1f* c = GetCmdSpace<gles2::cmds::VertexAttrib1f>();
+ if (c) {
+ c->Init(indx, x);
+ }
+}
+
+void VertexAttrib1fvImmediate(GLuint indx, const GLfloat* values) {
+ const uint32_t size = gles2::cmds::VertexAttrib1fvImmediate::ComputeSize();
+ gles2::cmds::VertexAttrib1fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::VertexAttrib1fvImmediate>(
+ size);
+ if (c) {
+ c->Init(indx, values);
+ }
+}
+
+void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) {
+ gles2::cmds::VertexAttrib2f* c = GetCmdSpace<gles2::cmds::VertexAttrib2f>();
+ if (c) {
+ c->Init(indx, x, y);
+ }
+}
+
+void VertexAttrib2fvImmediate(GLuint indx, const GLfloat* values) {
+ const uint32_t size = gles2::cmds::VertexAttrib2fvImmediate::ComputeSize();
+ gles2::cmds::VertexAttrib2fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::VertexAttrib2fvImmediate>(
+ size);
+ if (c) {
+ c->Init(indx, values);
+ }
+}
+
+void VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z) {
+ gles2::cmds::VertexAttrib3f* c = GetCmdSpace<gles2::cmds::VertexAttrib3f>();
+ if (c) {
+ c->Init(indx, x, y, z);
+ }
+}
+
+void VertexAttrib3fvImmediate(GLuint indx, const GLfloat* values) {
+ const uint32_t size = gles2::cmds::VertexAttrib3fvImmediate::ComputeSize();
+ gles2::cmds::VertexAttrib3fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::VertexAttrib3fvImmediate>(
+ size);
+ if (c) {
+ c->Init(indx, values);
+ }
+}
+
+void VertexAttrib4f(GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w) {
+ gles2::cmds::VertexAttrib4f* c = GetCmdSpace<gles2::cmds::VertexAttrib4f>();
+ if (c) {
+ c->Init(indx, x, y, z, w);
+ }
+}
+
+void VertexAttrib4fvImmediate(GLuint indx, const GLfloat* values) {
+ const uint32_t size = gles2::cmds::VertexAttrib4fvImmediate::ComputeSize();
+ gles2::cmds::VertexAttrib4fvImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::VertexAttrib4fvImmediate>(
+ size);
+ if (c) {
+ c->Init(indx, values);
+ }
+}
+
+void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ GLuint offset) {
+ gles2::cmds::VertexAttribPointer* c =
+ GetCmdSpace<gles2::cmds::VertexAttribPointer>();
+ if (c) {
+ c->Init(indx, size, type, normalized, stride, offset);
+ }
+}
+
+void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) {
+ gles2::cmds::Viewport* c = GetCmdSpace<gles2::cmds::Viewport>();
+ if (c) {
+ c->Init(x, y, width, height);
+ }
+}
+
+void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ gles2::cmds::BlitFramebufferCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::BlitFramebufferCHROMIUM>();
+ if (c) {
+ c->Init(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ }
+}
+
+void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::RenderbufferStorageMultisampleCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::RenderbufferStorageMultisampleCHROMIUM>();
+ if (c) {
+ c->Init(target, samples, internalformat, width, height);
+ }
+}
+
+void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::RenderbufferStorageMultisampleEXT* c =
+ GetCmdSpace<gles2::cmds::RenderbufferStorageMultisampleEXT>();
+ if (c) {
+ c->Init(target, samples, internalformat, width, height);
+ }
+}
+
+void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLsizei samples) {
+ gles2::cmds::FramebufferTexture2DMultisampleEXT* c =
+ GetCmdSpace<gles2::cmds::FramebufferTexture2DMultisampleEXT>();
+ if (c) {
+ c->Init(target, attachment, textarget, texture, samples);
+ }
+}
+
+void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) {
+ gles2::cmds::TexStorage2DEXT* c = GetCmdSpace<gles2::cmds::TexStorage2DEXT>();
+ if (c) {
+ c->Init(target, levels, internalFormat, width, height);
+ }
+}
+
+void GenQueriesEXTImmediate(GLsizei n, GLuint* queries) {
+ const uint32_t size = gles2::cmds::GenQueriesEXTImmediate::ComputeSize(n);
+ gles2::cmds::GenQueriesEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenQueriesEXTImmediate>(size);
+ if (c) {
+ c->Init(n, queries);
+ }
+}
+
+void DeleteQueriesEXTImmediate(GLsizei n, const GLuint* queries) {
+ const uint32_t size = gles2::cmds::DeleteQueriesEXTImmediate::ComputeSize(n);
+ gles2::cmds::DeleteQueriesEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DeleteQueriesEXTImmediate>(
+ size);
+ if (c) {
+ c->Init(n, queries);
+ }
+}
+
+void BeginQueryEXT(GLenum target,
+ GLuint id,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ gles2::cmds::BeginQueryEXT* c = GetCmdSpace<gles2::cmds::BeginQueryEXT>();
+ if (c) {
+ c->Init(target, id, sync_data_shm_id, sync_data_shm_offset);
+ }
+}
+
+void EndQueryEXT(GLenum target, GLuint submit_count) {
+ gles2::cmds::EndQueryEXT* c = GetCmdSpace<gles2::cmds::EndQueryEXT>();
+ if (c) {
+ c->Init(target, submit_count);
+ }
+}
+
+void InsertEventMarkerEXT(GLuint bucket_id) {
+ gles2::cmds::InsertEventMarkerEXT* c =
+ GetCmdSpace<gles2::cmds::InsertEventMarkerEXT>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void PushGroupMarkerEXT(GLuint bucket_id) {
+ gles2::cmds::PushGroupMarkerEXT* c =
+ GetCmdSpace<gles2::cmds::PushGroupMarkerEXT>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void PopGroupMarkerEXT() {
+ gles2::cmds::PopGroupMarkerEXT* c =
+ GetCmdSpace<gles2::cmds::PopGroupMarkerEXT>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void GenVertexArraysOESImmediate(GLsizei n, GLuint* arrays) {
+ const uint32_t size =
+ gles2::cmds::GenVertexArraysOESImmediate::ComputeSize(n);
+ gles2::cmds::GenVertexArraysOESImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::GenVertexArraysOESImmediate>(
+ size);
+ if (c) {
+ c->Init(n, arrays);
+ }
+}
+
+void DeleteVertexArraysOESImmediate(GLsizei n, const GLuint* arrays) {
+ const uint32_t size =
+ gles2::cmds::DeleteVertexArraysOESImmediate::ComputeSize(n);
+ gles2::cmds::DeleteVertexArraysOESImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::DeleteVertexArraysOESImmediate>(size);
+ if (c) {
+ c->Init(n, arrays);
+ }
+}
+
+void IsVertexArrayOES(GLuint array,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsVertexArrayOES* c =
+ GetCmdSpace<gles2::cmds::IsVertexArrayOES>();
+ if (c) {
+ c->Init(array, result_shm_id, result_shm_offset);
+ }
+}
+
+void BindVertexArrayOES(GLuint array) {
+ gles2::cmds::BindVertexArrayOES* c =
+ GetCmdSpace<gles2::cmds::BindVertexArrayOES>();
+ if (c) {
+ c->Init(array);
+ }
+}
+
+void SwapBuffers() {
+ gles2::cmds::SwapBuffers* c = GetCmdSpace<gles2::cmds::SwapBuffers>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::GetMaxValueInBufferCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::GetMaxValueInBufferCHROMIUM>();
+ if (c) {
+ c->Init(buffer_id, count, type, offset, result_shm_id, result_shm_offset);
+ }
+}
+
+void EnableFeatureCHROMIUM(GLuint bucket_id,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::EnableFeatureCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::EnableFeatureCHROMIUM>();
+ if (c) {
+ c->Init(bucket_id, result_shm_id, result_shm_offset);
+ }
+}
+
+void ResizeCHROMIUM(GLuint width, GLuint height, GLfloat scale_factor) {
+ gles2::cmds::ResizeCHROMIUM* c = GetCmdSpace<gles2::cmds::ResizeCHROMIUM>();
+ if (c) {
+ c->Init(width, height, scale_factor);
+ }
+}
+
+void GetRequestableExtensionsCHROMIUM(uint32_t bucket_id) {
+ gles2::cmds::GetRequestableExtensionsCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::GetRequestableExtensionsCHROMIUM>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void RequestExtensionCHROMIUM(uint32_t bucket_id) {
+ gles2::cmds::RequestExtensionCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::RequestExtensionCHROMIUM>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void GetMultipleIntegervCHROMIUM(uint32_t pnames_shm_id,
+ uint32_t pnames_shm_offset,
+ GLuint count,
+ uint32_t results_shm_id,
+ uint32_t results_shm_offset,
+ GLsizeiptr size) {
+ gles2::cmds::GetMultipleIntegervCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::GetMultipleIntegervCHROMIUM>();
+ if (c) {
+ c->Init(pnames_shm_id,
+ pnames_shm_offset,
+ count,
+ results_shm_id,
+ results_shm_offset,
+ size);
+ }
+}
+
+void GetProgramInfoCHROMIUM(GLuint program, uint32_t bucket_id) {
+ gles2::cmds::GetProgramInfoCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::GetProgramInfoCHROMIUM>();
+ if (c) {
+ c->Init(program, bucket_id);
+ }
+}
+
+void GetTranslatedShaderSourceANGLE(GLuint shader, uint32_t bucket_id) {
+ gles2::cmds::GetTranslatedShaderSourceANGLE* c =
+ GetCmdSpace<gles2::cmds::GetTranslatedShaderSourceANGLE>();
+ if (c) {
+ c->Init(shader, bucket_id);
+ }
+}
+
+void PostSubBufferCHROMIUM(GLint x, GLint y, GLint width, GLint height) {
+ gles2::cmds::PostSubBufferCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::PostSubBufferCHROMIUM>();
+ if (c) {
+ c->Init(x, y, width, height);
+ }
+}
+
+void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) {
+ gles2::cmds::TexImageIOSurface2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::TexImageIOSurface2DCHROMIUM>();
+ if (c) {
+ c->Init(target, width, height, ioSurfaceId, plane);
+ }
+}
+
+void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) {
+ gles2::cmds::CopyTextureCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::CopyTextureCHROMIUM>();
+ if (c) {
+ c->Init(target, source_id, dest_id, level, internalformat, dest_type);
+ }
+}
+
+void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) {
+ gles2::cmds::DrawArraysInstancedANGLE* c =
+ GetCmdSpace<gles2::cmds::DrawArraysInstancedANGLE>();
+ if (c) {
+ c->Init(mode, first, count, primcount);
+ }
+}
+
+void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ GLuint index_offset,
+ GLsizei primcount) {
+ gles2::cmds::DrawElementsInstancedANGLE* c =
+ GetCmdSpace<gles2::cmds::DrawElementsInstancedANGLE>();
+ if (c) {
+ c->Init(mode, count, type, index_offset, primcount);
+ }
+}
+
+void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) {
+ gles2::cmds::VertexAttribDivisorANGLE* c =
+ GetCmdSpace<gles2::cmds::VertexAttribDivisorANGLE>();
+ if (c) {
+ c->Init(index, divisor);
+ }
+}
+
+void ProduceTextureCHROMIUMImmediate(GLenum target, const GLbyte* mailbox) {
+ const uint32_t size =
+ gles2::cmds::ProduceTextureCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::ProduceTextureCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::ProduceTextureCHROMIUMImmediate>(size);
+ if (c) {
+ c->Init(target, mailbox);
+ }
+}
+
+void ProduceTextureDirectCHROMIUMImmediate(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) {
+ const uint32_t size =
+ gles2::cmds::ProduceTextureDirectCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::ProduceTextureDirectCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::ProduceTextureDirectCHROMIUMImmediate>(size);
+ if (c) {
+ c->Init(texture, target, mailbox);
+ }
+}
+
+void ConsumeTextureCHROMIUMImmediate(GLenum target, const GLbyte* mailbox) {
+ const uint32_t size =
+ gles2::cmds::ConsumeTextureCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::ConsumeTextureCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::ConsumeTextureCHROMIUMImmediate>(size);
+ if (c) {
+ c->Init(target, mailbox);
+ }
+}
+
+void BindUniformLocationCHROMIUMBucket(GLuint program,
+ GLint location,
+ uint32_t name_bucket_id) {
+ gles2::cmds::BindUniformLocationCHROMIUMBucket* c =
+ GetCmdSpace<gles2::cmds::BindUniformLocationCHROMIUMBucket>();
+ if (c) {
+ c->Init(program, location, name_bucket_id);
+ }
+}
+
+void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ gles2::cmds::BindTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::BindTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target, imageId);
+ }
+}
+
+void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ gles2::cmds::ReleaseTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::ReleaseTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target, imageId);
+ }
+}
+
+void TraceBeginCHROMIUM(GLuint bucket_id) {
+ gles2::cmds::TraceBeginCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::TraceBeginCHROMIUM>();
+ if (c) {
+ c->Init(bucket_id);
+ }
+}
+
+void TraceEndCHROMIUM() {
+ gles2::cmds::TraceEndCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::TraceEndCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset,
+ uint32_t async_upload_token,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ gles2::cmds::AsyncTexSubImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::AsyncTexSubImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target,
+ level,
+ xoffset,
+ yoffset,
+ width,
+ height,
+ format,
+ type,
+ data_shm_id,
+ data_shm_offset,
+ async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ }
+}
+
+void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ uint32_t pixels_shm_id,
+ uint32_t pixels_shm_offset,
+ uint32_t async_upload_token,
+ uint32_t sync_data_shm_id,
+ uint32_t sync_data_shm_offset) {
+ gles2::cmds::AsyncTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::AsyncTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target,
+ level,
+ internalformat,
+ width,
+ height,
+ format,
+ type,
+ pixels_shm_id,
+ pixels_shm_offset,
+ async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ }
+}
+
+void WaitAsyncTexImage2DCHROMIUM(GLenum target) {
+ gles2::cmds::WaitAsyncTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::WaitAsyncTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init(target);
+ }
+}
+
+void WaitAllAsyncTexImage2DCHROMIUM() {
+ gles2::cmds::WaitAllAsyncTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::WaitAllAsyncTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void DiscardFramebufferEXTImmediate(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) {
+ const uint32_t size =
+ gles2::cmds::DiscardFramebufferEXTImmediate::ComputeSize(count);
+ gles2::cmds::DiscardFramebufferEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<
+ gles2::cmds::DiscardFramebufferEXTImmediate>(size);
+ if (c) {
+ c->Init(target, count, attachments);
+ }
+}
+
+void LoseContextCHROMIUM(GLenum current, GLenum other) {
+ gles2::cmds::LoseContextCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::LoseContextCHROMIUM>();
+ if (c) {
+ c->Init(current, other);
+ }
+}
+
+void WaitSyncPointCHROMIUM(GLuint sync_point) {
+ gles2::cmds::WaitSyncPointCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::WaitSyncPointCHROMIUM>();
+ if (c) {
+ c->Init(sync_point);
+ }
+}
+
+void DrawBuffersEXTImmediate(GLsizei count, const GLenum* bufs) {
+ const uint32_t size =
+ gles2::cmds::DrawBuffersEXTImmediate::ComputeSize(count);
+ gles2::cmds::DrawBuffersEXTImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::DrawBuffersEXTImmediate>(size);
+ if (c) {
+ c->Init(count, bufs);
+ }
+}
+
+void DiscardBackbufferCHROMIUM() {
+ gles2::cmds::DiscardBackbufferCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::DiscardBackbufferCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
+void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) {
+ gles2::cmds::ScheduleOverlayPlaneCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::ScheduleOverlayPlaneCHROMIUM>();
+ if (c) {
+ c->Init(plane_z_order,
+ plane_transform,
+ overlay_texture_id,
+ bounds_x,
+ bounds_y,
+ bounds_width,
+ bounds_height,
+ uv_x,
+ uv_y,
+ uv_width,
+ uv_height);
+ }
+}
+
+void MatrixLoadfCHROMIUMImmediate(GLenum matrixMode, const GLfloat* m) {
+ const uint32_t size =
+ gles2::cmds::MatrixLoadfCHROMIUMImmediate::ComputeSize();
+ gles2::cmds::MatrixLoadfCHROMIUMImmediate* c =
+ GetImmediateCmdSpaceTotalSize<gles2::cmds::MatrixLoadfCHROMIUMImmediate>(
+ size);
+ if (c) {
+ c->Init(matrixMode, m);
+ }
+}
+
+void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
+ gles2::cmds::MatrixLoadIdentityCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::MatrixLoadIdentityCHROMIUM>();
+ if (c) {
+ c->Init(matrixMode);
+ }
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_impl_export.h b/gpu/command_buffer/client/gles2_impl_export.h
new file mode 100644
index 0000000..ee63565
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_impl_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPL_EXPORT_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPL_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(GLES2_IMPL_IMPLEMENTATION)
+#define GLES2_IMPL_EXPORT __declspec(dllexport)
+#else
+#define GLES2_IMPL_EXPORT __declspec(dllimport)
+#endif // defined(GLES2_IMPL_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(GLES2_IMPL_IMPLEMENTATION)
+#define GLES2_IMPL_EXPORT __attribute__((visibility("default")))
+#else
+#define GLES2_IMPL_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define GLES2_IMPL_EXPORT
+#endif
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPL_EXPORT_H_
diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc
new file mode 100644
index 0000000..aabfa45
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation.cc
@@ -0,0 +1,4190 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A class to emulate GLES2 over command buffers.
+
+#include "gpu/command_buffer/client/gles2_implementation.h"
+
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+#include <algorithm>
+#include <limits>
+#include <map>
+#include <queue>
+#include <set>
+#include <sstream>
+#include <string>
+#include "base/bind.h"
+#include "gpu/command_buffer/client/buffer_tracker.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
+#include "gpu/command_buffer/client/program_info_manager.h"
+#include "gpu/command_buffer/client/query_tracker.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/client/vertex_array_object_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/trace_event.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+
+#if defined(__native_client__) && !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+#define GLES2_SUPPORT_CLIENT_SIDE_ARRAYS
+#endif
+
+#if defined(GPU_CLIENT_DEBUG)
+#include "base/command_line.h"
+#include "gpu/command_buffer/client/gpu_switches.h"
+#endif
+
+namespace gpu {
+namespace gles2 {
+
+// A 32-bit and 64-bit compatible way of converting a pointer to a GLuint.
+static GLuint ToGLuint(const void* ptr) {
+ return static_cast<GLuint>(reinterpret_cast<size_t>(ptr));
+}
+
+#if !defined(_MSC_VER)
+const size_t GLES2Implementation::kMaxSizeOfSimpleResult;
+const unsigned int GLES2Implementation::kStartingOffset;
+#endif
+
+GLES2Implementation::GLStaticState::GLStaticState() {
+}
+
+GLES2Implementation::GLStaticState::~GLStaticState() {
+}
+
+GLES2Implementation::GLStaticState::IntState::IntState()
+ : max_combined_texture_image_units(0),
+ max_cube_map_texture_size(0),
+ max_fragment_uniform_vectors(0),
+ max_renderbuffer_size(0),
+ max_texture_image_units(0),
+ max_texture_size(0),
+ max_varying_vectors(0),
+ max_vertex_attribs(0),
+ max_vertex_texture_image_units(0),
+ max_vertex_uniform_vectors(0),
+ num_compressed_texture_formats(0),
+ num_shader_binary_formats(0),
+ bind_generates_resource_chromium(0) {}
+
+GLES2Implementation::SingleThreadChecker::SingleThreadChecker(
+ GLES2Implementation* gles2_implementation)
+ : gles2_implementation_(gles2_implementation) {
+ CHECK_EQ(0, gles2_implementation_->use_count_);
+ ++gles2_implementation_->use_count_;
+}
+
+GLES2Implementation::SingleThreadChecker::~SingleThreadChecker() {
+ --gles2_implementation_->use_count_;
+ CHECK_EQ(0, gles2_implementation_->use_count_);
+}
+
+GLES2Implementation::GLES2Implementation(
+ GLES2CmdHelper* helper,
+ ShareGroup* share_group,
+ TransferBufferInterface* transfer_buffer,
+ bool bind_generates_resource,
+ bool lose_context_when_out_of_memory,
+ GpuControl* gpu_control)
+ : helper_(helper),
+ transfer_buffer_(transfer_buffer),
+ angle_pack_reverse_row_order_status_(kUnknownExtensionStatus),
+ chromium_framebuffer_multisample_(kUnknownExtensionStatus),
+ pack_alignment_(4),
+ unpack_alignment_(4),
+ unpack_flip_y_(false),
+ unpack_row_length_(0),
+ unpack_skip_rows_(0),
+ unpack_skip_pixels_(0),
+ pack_reverse_row_order_(false),
+ active_texture_unit_(0),
+ bound_framebuffer_(0),
+ bound_read_framebuffer_(0),
+ bound_renderbuffer_(0),
+ current_program_(0),
+ bound_array_buffer_id_(0),
+ bound_pixel_pack_transfer_buffer_id_(0),
+ bound_pixel_unpack_transfer_buffer_id_(0),
+ async_upload_token_(0),
+ async_upload_sync_(NULL),
+ async_upload_sync_shm_id_(0),
+ async_upload_sync_shm_offset_(0),
+ error_bits_(0),
+ debug_(false),
+ lose_context_when_out_of_memory_(lose_context_when_out_of_memory),
+ use_count_(0),
+ error_message_callback_(NULL),
+ gpu_control_(gpu_control),
+ capabilities_(gpu_control->GetCapabilities()),
+ weak_ptr_factory_(this) {
+ DCHECK(helper);
+ DCHECK(transfer_buffer);
+ DCHECK(gpu_control);
+
+ std::stringstream ss;
+ ss << std::hex << this;
+ this_in_hex_ = ss.str();
+
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ debug_ = CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableGPUClientLogging);
+ });
+
+ share_group_ =
+ (share_group ? share_group : new ShareGroup(bind_generates_resource));
+ DCHECK(share_group_->bind_generates_resource() == bind_generates_resource);
+
+ memset(&reserved_ids_, 0, sizeof(reserved_ids_));
+}
+
+bool GLES2Implementation::Initialize(
+ unsigned int starting_transfer_buffer_size,
+ unsigned int min_transfer_buffer_size,
+ unsigned int max_transfer_buffer_size,
+ unsigned int mapped_memory_limit) {
+ TRACE_EVENT0("gpu", "GLES2Implementation::Initialize");
+ DCHECK_GE(starting_transfer_buffer_size, min_transfer_buffer_size);
+ DCHECK_LE(starting_transfer_buffer_size, max_transfer_buffer_size);
+ DCHECK_GE(min_transfer_buffer_size, kStartingOffset);
+
+ if (!transfer_buffer_->Initialize(
+ starting_transfer_buffer_size,
+ kStartingOffset,
+ min_transfer_buffer_size,
+ max_transfer_buffer_size,
+ kAlignment,
+ kSizeToFlush)) {
+ return false;
+ }
+
+ mapped_memory_.reset(
+ new MappedMemoryManager(
+ helper_,
+ base::Bind(&GLES2Implementation::PollAsyncUploads,
+ // The mapped memory manager is owned by |this| here, and
+ // since its destroyed before before we destroy ourselves
+ // we don't need extra safety measures for this closure.
+ base::Unretained(this)),
+ mapped_memory_limit));
+
+ unsigned chunk_size = 2 * 1024 * 1024;
+ if (mapped_memory_limit != kNoLimit) {
+ // Use smaller chunks if the client is very memory conscientious.
+ chunk_size = std::min(mapped_memory_limit / 4, chunk_size);
+ }
+ mapped_memory_->set_chunk_size_multiple(chunk_size);
+
+ if (!QueryAndCacheStaticState())
+ return false;
+
+ util_.set_num_compressed_texture_formats(
+ static_state_.int_state.num_compressed_texture_formats);
+ util_.set_num_shader_binary_formats(
+ static_state_.int_state.num_shader_binary_formats);
+
+ texture_units_.reset(
+ new TextureUnit[
+ static_state_.int_state.max_combined_texture_image_units]);
+
+ query_tracker_.reset(new QueryTracker(mapped_memory_.get()));
+ buffer_tracker_.reset(new BufferTracker(mapped_memory_.get()));
+ gpu_memory_buffer_tracker_.reset(new GpuMemoryBufferTracker(gpu_control_));
+
+ query_id_allocator_.reset(new IdAllocator());
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ GetIdHandler(id_namespaces::kBuffers)->MakeIds(
+ this, kClientSideArrayId, arraysize(reserved_ids_), &reserved_ids_[0]);
+#endif
+
+ vertex_array_object_manager_.reset(new VertexArrayObjectManager(
+ static_state_.int_state.max_vertex_attribs,
+ reserved_ids_[0],
+ reserved_ids_[1]));
+
+ // GL_BIND_GENERATES_RESOURCE_CHROMIUM state must be the same
+ // on Client & Service.
+ if (static_state_.int_state.bind_generates_resource_chromium !=
+ (share_group_->bind_generates_resource() ? 1 : 0)) {
+ SetGLError(GL_INVALID_OPERATION,
+ "Initialize",
+ "Service bind_generates_resource mismatch.");
+ return false;
+ }
+
+ return true;
+}
+
+bool GLES2Implementation::QueryAndCacheStaticState() {
+ TRACE_EVENT0("gpu", "GLES2Implementation::QueryAndCacheStaticState");
+ // Setup query for multiple GetIntegerv's
+ static const GLenum pnames[] = {
+ GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS,
+ GL_MAX_CUBE_MAP_TEXTURE_SIZE,
+ GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ GL_MAX_RENDERBUFFER_SIZE,
+ GL_MAX_TEXTURE_IMAGE_UNITS,
+ GL_MAX_TEXTURE_SIZE,
+ GL_MAX_VARYING_VECTORS,
+ GL_MAX_VERTEX_ATTRIBS,
+ GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS,
+ GL_MAX_VERTEX_UNIFORM_VECTORS,
+ GL_NUM_COMPRESSED_TEXTURE_FORMATS,
+ GL_NUM_SHADER_BINARY_FORMATS,
+ GL_BIND_GENERATES_RESOURCE_CHROMIUM,
+ };
+
+ GetMultipleIntegervState integerv_state(
+ pnames, arraysize(pnames),
+ &static_state_.int_state.max_combined_texture_image_units,
+ sizeof(static_state_.int_state));
+ if (!GetMultipleIntegervSetup(&integerv_state)) {
+ return false;
+ }
+
+ // Setup query for multiple GetShaderPrecisionFormat's
+ static const GLenum precision_params[][2] = {
+ { GL_VERTEX_SHADER, GL_LOW_INT },
+ { GL_VERTEX_SHADER, GL_MEDIUM_INT },
+ { GL_VERTEX_SHADER, GL_HIGH_INT },
+ { GL_VERTEX_SHADER, GL_LOW_FLOAT },
+ { GL_VERTEX_SHADER, GL_MEDIUM_FLOAT },
+ { GL_VERTEX_SHADER, GL_HIGH_FLOAT },
+ { GL_FRAGMENT_SHADER, GL_LOW_INT },
+ { GL_FRAGMENT_SHADER, GL_MEDIUM_INT },
+ { GL_FRAGMENT_SHADER, GL_HIGH_INT },
+ { GL_FRAGMENT_SHADER, GL_LOW_FLOAT },
+ { GL_FRAGMENT_SHADER, GL_MEDIUM_FLOAT },
+ { GL_FRAGMENT_SHADER, GL_HIGH_FLOAT },
+ };
+
+ GetAllShaderPrecisionFormatsState precision_state(
+ precision_params, arraysize(precision_params));
+ GetAllShaderPrecisionFormatsSetup(&precision_state);
+
+ // Allocate and partition transfer buffer for all requests
+ void* buffer = transfer_buffer_->Alloc(
+ integerv_state.transfer_buffer_size_needed +
+ precision_state.transfer_buffer_size_needed);
+ if (!buffer) {
+ SetGLError(GL_OUT_OF_MEMORY, "QueryAndCacheStaticState",
+ "Transfer buffer allocation failed.");
+ return false;
+ }
+ integerv_state.buffer = buffer;
+ precision_state.results_buffer =
+ static_cast<char*>(buffer) + integerv_state.transfer_buffer_size_needed;
+
+ // Make all the requests and wait once for all the results.
+ GetMultipleIntegervRequest(&integerv_state);
+ GetAllShaderPrecisionFormatsRequest(&precision_state);
+ WaitForCmd();
+ GetMultipleIntegervOnCompleted(&integerv_state);
+ GetAllShaderPrecisionFormatsOnCompleted(&precision_state);
+
+ // TODO(gman): We should be able to free without a token.
+ transfer_buffer_->FreePendingToken(buffer, helper_->InsertToken());
+ CheckGLError();
+
+ return true;
+}
+
+GLES2Implementation::~GLES2Implementation() {
+ // Make sure the queries are finished otherwise we'll delete the
+ // shared memory (mapped_memory_) which will free the memory used
+ // by the queries. The GPU process when validating that memory is still
+ // shared will fail and abort (ie, it will stop running).
+ WaitForCmd();
+ query_tracker_.reset();
+
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ DeleteBuffers(arraysize(reserved_ids_), &reserved_ids_[0]);
+#endif
+
+ // Release any per-context data in share group.
+ share_group_->FreeContext(this);
+
+ buffer_tracker_.reset();
+
+ FreeAllAsyncUploadBuffers();
+
+ if (async_upload_sync_) {
+ mapped_memory_->Free(async_upload_sync_);
+ async_upload_sync_ = NULL;
+ }
+
+ // Make sure the commands make it the service.
+ WaitForCmd();
+}
+
+GLES2CmdHelper* GLES2Implementation::helper() const {
+ return helper_;
+}
+
+IdHandlerInterface* GLES2Implementation::GetIdHandler(int namespace_id) const {
+ return share_group_->GetIdHandler(namespace_id);
+}
+
+IdAllocator* GLES2Implementation::GetIdAllocator(int namespace_id) const {
+ if (namespace_id == id_namespaces::kQueries)
+ return query_id_allocator_.get();
+ NOTREACHED();
+ return NULL;
+}
+
+void* GLES2Implementation::GetResultBuffer() {
+ return transfer_buffer_->GetResultBuffer();
+}
+
+int32 GLES2Implementation::GetResultShmId() {
+ return transfer_buffer_->GetShmId();
+}
+
+uint32 GLES2Implementation::GetResultShmOffset() {
+ return transfer_buffer_->GetResultOffset();
+}
+
+void GLES2Implementation::FreeUnusedSharedMemory() {
+ mapped_memory_->FreeUnused();
+}
+
+void GLES2Implementation::FreeEverything() {
+ FreeAllAsyncUploadBuffers();
+ WaitForCmd();
+ query_tracker_->Shrink();
+ FreeUnusedSharedMemory();
+ transfer_buffer_->Free();
+ helper_->FreeRingBuffer();
+}
+
+void GLES2Implementation::RunIfContextNotLost(const base::Closure& callback) {
+ if (!helper_->IsContextLost())
+ callback.Run();
+}
+
+void GLES2Implementation::SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) {
+ gpu_control_->SignalSyncPoint(
+ sync_point,
+ base::Bind(&GLES2Implementation::RunIfContextNotLost,
+ weak_ptr_factory_.GetWeakPtr(),
+ callback));
+}
+
+void GLES2Implementation::SignalQuery(uint32 query,
+ const base::Closure& callback) {
+ // Flush previously entered commands to ensure ordering with any
+ // glBeginQueryEXT() calls that may have been put into the context.
+ ShallowFlushCHROMIUM();
+ gpu_control_->SignalQuery(
+ query,
+ base::Bind(&GLES2Implementation::RunIfContextNotLost,
+ weak_ptr_factory_.GetWeakPtr(),
+ callback));
+}
+
+void GLES2Implementation::SetSurfaceVisible(bool visible) {
+ TRACE_EVENT1(
+ "gpu", "GLES2Implementation::SetSurfaceVisible", "visible", visible);
+ // TODO(piman): This probably should be ShallowFlushCHROMIUM().
+ Flush();
+ gpu_control_->SetSurfaceVisible(visible);
+ if (!visible)
+ FreeEverything();
+}
+
+void GLES2Implementation::WaitForCmd() {
+ TRACE_EVENT0("gpu", "GLES2::WaitForCmd");
+ helper_->CommandBufferHelper::Finish();
+}
+
+bool GLES2Implementation::IsExtensionAvailable(const char* ext) {
+ const char* extensions =
+ reinterpret_cast<const char*>(GetStringHelper(GL_EXTENSIONS));
+ if (!extensions)
+ return false;
+
+ int length = strlen(ext);
+ while (true) {
+ int n = strcspn(extensions, " ");
+ if (n == length && 0 == strncmp(ext, extensions, length)) {
+ return true;
+ }
+ if ('\0' == extensions[n]) {
+ return false;
+ }
+ extensions += n + 1;
+ }
+}
+
+bool GLES2Implementation::IsExtensionAvailableHelper(
+ const char* extension, ExtensionStatus* status) {
+ switch (*status) {
+ case kAvailableExtensionStatus:
+ return true;
+ case kUnavailableExtensionStatus:
+ return false;
+ default: {
+ bool available = IsExtensionAvailable(extension);
+ *status = available ? kAvailableExtensionStatus :
+ kUnavailableExtensionStatus;
+ return available;
+ }
+ }
+}
+
+bool GLES2Implementation::IsAnglePackReverseRowOrderAvailable() {
+ return IsExtensionAvailableHelper(
+ "GL_ANGLE_pack_reverse_row_order",
+ &angle_pack_reverse_row_order_status_);
+}
+
+bool GLES2Implementation::IsChromiumFramebufferMultisampleAvailable() {
+ return IsExtensionAvailableHelper(
+ "GL_CHROMIUM_framebuffer_multisample",
+ &chromium_framebuffer_multisample_);
+}
+
+const std::string& GLES2Implementation::GetLogPrefix() const {
+ const std::string& prefix(debug_marker_manager_.GetMarker());
+ return prefix.empty() ? this_in_hex_ : prefix;
+}
+
+GLenum GLES2Implementation::GetError() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetError()");
+ GLenum err = GetGLError();
+ GPU_CLIENT_LOG("returned " << GLES2Util::GetStringError(err));
+ return err;
+}
+
+GLenum GLES2Implementation::GetClientSideGLError() {
+ if (error_bits_ == 0) {
+ return GL_NO_ERROR;
+ }
+
+ GLenum error = GL_NO_ERROR;
+ for (uint32 mask = 1; mask != 0; mask = mask << 1) {
+ if ((error_bits_ & mask) != 0) {
+ error = GLES2Util::GLErrorBitToGLError(mask);
+ break;
+ }
+ }
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ return error;
+}
+
+GLenum GLES2Implementation::GetGLError() {
+ TRACE_EVENT0("gpu", "GLES2::GetGLError");
+ // Check the GL error first, then our wrapped error.
+ typedef cmds::GetError::Result Result;
+ Result* result = GetResultAs<Result*>();
+ // If we couldn't allocate a result the context is lost.
+ if (!result) {
+ return GL_NO_ERROR;
+ }
+ *result = GL_NO_ERROR;
+ helper_->GetError(GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLenum error = *result;
+ if (error == GL_NO_ERROR) {
+ error = GetClientSideGLError();
+ } else {
+ // There was an error, clear the corresponding wrapped error.
+ error_bits_ &= ~GLES2Util::GLErrorToErrorBit(error);
+ }
+ return error;
+}
+
+#if defined(GL_CLIENT_FAIL_GL_ERRORS)
+void GLES2Implementation::FailGLError(GLenum error) {
+ if (error != GL_NO_ERROR) {
+ NOTREACHED() << "Error";
+ }
+}
+// NOTE: Calling GetGLError overwrites data in the result buffer.
+void GLES2Implementation::CheckGLError() {
+ FailGLError(GetGLError());
+}
+#endif // defined(GPU_CLIENT_FAIL_GL_ERRORS)
+
+void GLES2Implementation::SetGLError(
+ GLenum error, const char* function_name, const char* msg) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] Client Synthesized Error: "
+ << GLES2Util::GetStringError(error) << ": "
+ << function_name << ": " << msg);
+ FailGLError(error);
+ if (msg) {
+ last_error_ = msg;
+ }
+ if (error_message_callback_) {
+ std::string temp(GLES2Util::GetStringError(error) + " : " +
+ function_name + ": " + (msg ? msg : ""));
+ error_message_callback_->OnErrorMessage(temp.c_str(), 0);
+ }
+ error_bits_ |= GLES2Util::GLErrorToErrorBit(error);
+
+ if (error == GL_OUT_OF_MEMORY && lose_context_when_out_of_memory_) {
+ helper_->LoseContextCHROMIUM(GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_UNKNOWN_CONTEXT_RESET_ARB);
+ }
+}
+
+void GLES2Implementation::SetGLErrorInvalidEnum(
+ const char* function_name, GLenum value, const char* label) {
+ SetGLError(GL_INVALID_ENUM, function_name,
+ (std::string(label) + " was " +
+ GLES2Util::GetStringEnum(value)).c_str());
+}
+
+bool GLES2Implementation::GetBucketContents(uint32 bucket_id,
+ std::vector<int8>* data) {
+ TRACE_EVENT0("gpu", "GLES2::GetBucketContents");
+ DCHECK(data);
+ const uint32 kStartSize = 32 * 1024;
+ ScopedTransferBufferPtr buffer(kStartSize, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return false;
+ }
+ typedef cmd::GetBucketStart::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ *result = 0;
+ helper_->GetBucketStart(
+ bucket_id, GetResultShmId(), GetResultShmOffset(),
+ buffer.size(), buffer.shm_id(), buffer.offset());
+ WaitForCmd();
+ uint32 size = *result;
+ data->resize(size);
+ if (size > 0u) {
+ uint32 offset = 0;
+ while (size) {
+ if (!buffer.valid()) {
+ buffer.Reset(size);
+ if (!buffer.valid()) {
+ return false;
+ }
+ helper_->GetBucketData(
+ bucket_id, offset, buffer.size(), buffer.shm_id(), buffer.offset());
+ WaitForCmd();
+ }
+ uint32 size_to_copy = std::min(size, buffer.size());
+ memcpy(&(*data)[offset], buffer.address(), size_to_copy);
+ offset += size_to_copy;
+ size -= size_to_copy;
+ buffer.Release();
+ }
+ // Free the bucket. This is not required but it does free up the memory.
+ // and we don't have to wait for the result so from the client's perspective
+ // it's cheap.
+ helper_->SetBucketSize(bucket_id, 0);
+ }
+ return true;
+}
+
+void GLES2Implementation::SetBucketContents(
+ uint32 bucket_id, const void* data, size_t size) {
+ DCHECK(data);
+ helper_->SetBucketSize(bucket_id, size);
+ if (size > 0u) {
+ uint32 offset = 0;
+ while (size) {
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+ memcpy(buffer.address(), static_cast<const int8*>(data) + offset,
+ buffer.size());
+ helper_->SetBucketData(
+ bucket_id, offset, buffer.size(), buffer.shm_id(), buffer.offset());
+ offset += buffer.size();
+ size -= buffer.size();
+ }
+ }
+}
+
+void GLES2Implementation::SetBucketAsCString(
+ uint32 bucket_id, const char* str) {
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ if (str) {
+ SetBucketContents(bucket_id, str, strlen(str) + 1);
+ } else {
+ helper_->SetBucketSize(bucket_id, 0);
+ }
+}
+
+bool GLES2Implementation::GetBucketAsString(
+ uint32 bucket_id, std::string* str) {
+ DCHECK(str);
+ std::vector<int8> data;
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ if (!GetBucketContents(bucket_id, &data)) {
+ return false;
+ }
+ if (data.empty()) {
+ return false;
+ }
+ str->assign(&data[0], &data[0] + data.size() - 1);
+ return true;
+}
+
+void GLES2Implementation::SetBucketAsString(
+ uint32 bucket_id, const std::string& str) {
+ // NOTE: strings are passed NULL terminated. That means the empty
+ // string will have a size of 1 and no-string will have a size of 0
+ SetBucketContents(bucket_id, str.c_str(), str.size() + 1);
+}
+
+void GLES2Implementation::Disable(GLenum cap) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDisable("
+ << GLES2Util::GetStringCapability(cap) << ")");
+ bool changed = false;
+ if (!state_.SetCapabilityState(cap, false, &changed) || changed) {
+ helper_->Disable(cap);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::Enable(GLenum cap) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnable("
+ << GLES2Util::GetStringCapability(cap) << ")");
+ bool changed = false;
+ if (!state_.SetCapabilityState(cap, true, &changed) || changed) {
+ helper_->Enable(cap);
+ }
+ CheckGLError();
+}
+
+GLboolean GLES2Implementation::IsEnabled(GLenum cap) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsEnabled("
+ << GLES2Util::GetStringCapability(cap) << ")");
+ bool state = false;
+ if (!state_.GetEnabled(cap, &state)) {
+ typedef cmds::IsEnabled::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsEnabled(cap, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ state = (*result) != 0;
+ }
+
+ GPU_CLIENT_LOG("returned " << state);
+ CheckGLError();
+ return state;
+}
+
+bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS:
+ *params = static_state_.int_state.max_combined_texture_image_units;
+ return true;
+ case GL_MAX_CUBE_MAP_TEXTURE_SIZE:
+ *params = static_state_.int_state.max_cube_map_texture_size;
+ return true;
+ case GL_MAX_FRAGMENT_UNIFORM_VECTORS:
+ *params = static_state_.int_state.max_fragment_uniform_vectors;
+ return true;
+ case GL_MAX_RENDERBUFFER_SIZE:
+ *params = static_state_.int_state.max_renderbuffer_size;
+ return true;
+ case GL_MAX_TEXTURE_IMAGE_UNITS:
+ *params = static_state_.int_state.max_texture_image_units;
+ return true;
+ case GL_MAX_TEXTURE_SIZE:
+ *params = static_state_.int_state.max_texture_size;
+ return true;
+ case GL_MAX_VARYING_VECTORS:
+ *params = static_state_.int_state.max_varying_vectors;
+ return true;
+ case GL_MAX_VERTEX_ATTRIBS:
+ *params = static_state_.int_state.max_vertex_attribs;
+ return true;
+ case GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS:
+ *params = static_state_.int_state.max_vertex_texture_image_units;
+ return true;
+ case GL_MAX_VERTEX_UNIFORM_VECTORS:
+ *params = static_state_.int_state.max_vertex_uniform_vectors;
+ return true;
+ case GL_NUM_COMPRESSED_TEXTURE_FORMATS:
+ *params = static_state_.int_state.num_compressed_texture_formats;
+ return true;
+ case GL_NUM_SHADER_BINARY_FORMATS:
+ *params = static_state_.int_state.num_shader_binary_formats;
+ return true;
+ case GL_ARRAY_BUFFER_BINDING:
+ if (share_group_->bind_generates_resource()) {
+ *params = bound_array_buffer_id_;
+ return true;
+ }
+ return false;
+ case GL_ELEMENT_ARRAY_BUFFER_BINDING:
+ if (share_group_->bind_generates_resource()) {
+ *params =
+ vertex_array_object_manager_->bound_element_array_buffer();
+ return true;
+ }
+ return false;
+ case GL_PIXEL_PACK_TRANSFER_BUFFER_BINDING_CHROMIUM:
+ *params = bound_pixel_pack_transfer_buffer_id_;
+ return true;
+ case GL_PIXEL_UNPACK_TRANSFER_BUFFER_BINDING_CHROMIUM:
+ *params = bound_pixel_unpack_transfer_buffer_id_;
+ return true;
+ case GL_ACTIVE_TEXTURE:
+ *params = active_texture_unit_ + GL_TEXTURE0;
+ return true;
+ case GL_TEXTURE_BINDING_2D:
+ if (share_group_->bind_generates_resource()) {
+ *params = texture_units_[active_texture_unit_].bound_texture_2d;
+ return true;
+ }
+ return false;
+ case GL_TEXTURE_BINDING_CUBE_MAP:
+ if (share_group_->bind_generates_resource()) {
+ *params = texture_units_[active_texture_unit_].bound_texture_cube_map;
+ return true;
+ }
+ return false;
+ case GL_TEXTURE_BINDING_EXTERNAL_OES:
+ if (share_group_->bind_generates_resource()) {
+ *params =
+ texture_units_[active_texture_unit_].bound_texture_external_oes;
+ return true;
+ }
+ return false;
+ case GL_FRAMEBUFFER_BINDING:
+ if (share_group_->bind_generates_resource()) {
+ *params = bound_framebuffer_;
+ return true;
+ }
+ return false;
+ case GL_READ_FRAMEBUFFER_BINDING:
+ if (IsChromiumFramebufferMultisampleAvailable() &&
+ share_group_->bind_generates_resource()) {
+ *params = bound_read_framebuffer_;
+ return true;
+ }
+ return false;
+ case GL_RENDERBUFFER_BINDING:
+ if (share_group_->bind_generates_resource()) {
+ *params = bound_renderbuffer_;
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+bool GLES2Implementation::GetBooleanvHelper(GLenum pname, GLboolean* params) {
+ // TODO(gman): Make this handle pnames that return more than 1 value.
+ GLint value;
+ if (!GetHelper(pname, &value)) {
+ return false;
+ }
+ *params = static_cast<GLboolean>(value);
+ return true;
+}
+
+bool GLES2Implementation::GetFloatvHelper(GLenum pname, GLfloat* params) {
+ // TODO(gman): Make this handle pnames that return more than 1 value.
+ GLint value;
+ if (!GetHelper(pname, &value)) {
+ return false;
+ }
+ *params = static_cast<GLfloat>(value);
+ return true;
+}
+
+bool GLES2Implementation::GetIntegervHelper(GLenum pname, GLint* params) {
+ return GetHelper(pname, params);
+}
+
+GLuint GLES2Implementation::GetMaxValueInBufferCHROMIUMHelper(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset) {
+ typedef cmds::GetMaxValueInBufferCHROMIUM::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return 0;
+ }
+ *result = 0;
+ helper_->GetMaxValueInBufferCHROMIUM(
+ buffer_id, count, type, offset, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ return *result;
+}
+
+GLuint GLES2Implementation::GetMaxValueInBufferCHROMIUM(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetMaxValueInBufferCHROMIUM("
+ << buffer_id << ", " << count << ", "
+ << GLES2Util::GetStringGetMaxIndexType(type)
+ << ", " << offset << ")");
+ GLuint result = GetMaxValueInBufferCHROMIUMHelper(
+ buffer_id, count, type, offset);
+ GPU_CLIENT_LOG("returned " << result);
+ CheckGLError();
+ return result;
+}
+
+void GLES2Implementation::RestoreElementAndArrayBuffers(bool restore) {
+ if (restore) {
+ RestoreArrayBuffer(restore);
+ // Restore the element array binding.
+ // We only need to restore it if it wasn't a client side array.
+ if (vertex_array_object_manager_->bound_element_array_buffer() == 0) {
+ helper_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+ }
+ }
+}
+
+void GLES2Implementation::RestoreArrayBuffer(bool restore) {
+ if (restore) {
+ // Restore the user's current binding.
+ helper_->BindBuffer(GL_ARRAY_BUFFER, bound_array_buffer_id_);
+ }
+}
+
+void GLES2Implementation::DrawElements(
+ GLenum mode, GLsizei count, GLenum type, const void* indices) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawElements("
+ << GLES2Util::GetStringDrawMode(mode) << ", "
+ << count << ", "
+ << GLES2Util::GetStringIndexType(type) << ", "
+ << static_cast<const void*>(indices) << ")");
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawElements", "count less than 0.");
+ return;
+ }
+ if (count == 0) {
+ return;
+ }
+ if (vertex_array_object_manager_->bound_element_array_buffer() != 0 &&
+ !ValidateOffset("glDrawElements", reinterpret_cast<GLintptr>(indices))) {
+ return;
+ }
+ GLuint offset = 0;
+ bool simulated = false;
+ if (!vertex_array_object_manager_->SetupSimulatedIndexAndClientSideBuffers(
+ "glDrawElements", this, helper_, count, type, 0, indices,
+ &offset, &simulated)) {
+ return;
+ }
+ helper_->DrawElements(mode, count, type, offset);
+ RestoreElementAndArrayBuffers(simulated);
+ CheckGLError();
+}
+
+void GLES2Implementation::Flush() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFlush()");
+ // Insert the cmd to call glFlush
+ helper_->Flush();
+ // Flush our command buffer
+ // (tell the service to execute up to the flush cmd.)
+ helper_->CommandBufferHelper::Flush();
+}
+
+void GLES2Implementation::ShallowFlushCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShallowFlushCHROMIUM()");
+ // Flush our command buffer
+ // (tell the service to execute up to the flush cmd.)
+ helper_->CommandBufferHelper::Flush();
+ // TODO(piman): Add the FreeEverything() logic here.
+}
+
+void GLES2Implementation::Finish() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ FinishHelper();
+}
+
+void GLES2Implementation::ShallowFinishCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2::ShallowFinishCHROMIUM");
+ // Flush our command buffer (tell the service to execute up to the flush cmd
+ // and don't return until it completes).
+ helper_->CommandBufferHelper::Finish();
+}
+
+void GLES2Implementation::FinishHelper() {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFinish()");
+ TRACE_EVENT0("gpu", "GLES2::Finish");
+ // Insert the cmd to call glFinish
+ helper_->Finish();
+ // Finish our command buffer
+ // (tell the service to execute up to the Finish cmd and wait for it to
+ // execute.)
+ helper_->CommandBufferHelper::Finish();
+}
+
+void GLES2Implementation::SwapBuffers() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSwapBuffers()");
+ // TODO(piman): Strictly speaking we'd want to insert the token after the
+ // swap, but the state update with the updated token might not have happened
+ // by the time the SwapBuffer callback gets called, forcing us to synchronize
+ // with the GPU process more than needed. So instead, make it happen before.
+ // All it means is that we could be slightly looser on the kMaxSwapBuffers
+ // semantics if the client doesn't use the callback mechanism, and by chance
+ // the scheduler yields between the InsertToken and the SwapBuffers.
+ swap_buffers_tokens_.push(helper_->InsertToken());
+ helper_->SwapBuffers();
+ helper_->CommandBufferHelper::Flush();
+ // Wait if we added too many swap buffers. Add 1 to kMaxSwapBuffers to
+ // compensate for TODO above.
+ if (swap_buffers_tokens_.size() > kMaxSwapBuffers + 1) {
+ helper_->WaitForToken(swap_buffers_tokens_.front());
+ swap_buffers_tokens_.pop();
+ }
+}
+
+void GLES2Implementation::BindAttribLocation(
+ GLuint program, GLuint index, const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindAttribLocation("
+ << program << ", " << index << ", " << name << ")");
+ SetBucketAsString(kResultBucketId, name);
+ helper_->BindAttribLocationBucket(program, index, kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+void GLES2Implementation::BindUniformLocationCHROMIUM(
+ GLuint program, GLint location, const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindUniformLocationCHROMIUM("
+ << program << ", " << location << ", " << name << ")");
+ SetBucketAsString(kResultBucketId, name);
+ helper_->BindUniformLocationCHROMIUMBucket(
+ program, location, kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetVertexAttribPointerv(
+ GLuint index, GLenum pname, void** ptr) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetVertexAttribPointer("
+ << index << ", " << GLES2Util::GetStringVertexPointer(pname) << ", "
+ << static_cast<void*>(ptr) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK(int32 num_results = 1);
+ if (!vertex_array_object_manager_->GetAttribPointer(index, pname, ptr)) {
+ TRACE_EVENT0("gpu", "GLES2::GetVertexAttribPointerv");
+ typedef cmds::GetVertexAttribPointerv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetVertexAttribPointerv(
+ index, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(ptr);
+ GPU_CLIENT_LOG_CODE_BLOCK(num_results = result->GetNumResults());
+ }
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < num_results; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << ptr[i]);
+ }
+ });
+ CheckGLError();
+}
+
+bool GLES2Implementation::DeleteProgramHelper(GLuint program) {
+ if (!GetIdHandler(id_namespaces::kProgramsAndShaders)->FreeIds(
+ this, 1, &program, &GLES2Implementation::DeleteProgramStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteProgram", "id not created by this context.");
+ return false;
+ }
+ if (program == current_program_) {
+ current_program_ = 0;
+ }
+ return true;
+}
+
+void GLES2Implementation::DeleteProgramStub(
+ GLsizei n, const GLuint* programs) {
+ DCHECK_EQ(1, n);
+ share_group_->program_info_manager()->DeleteInfo(programs[0]);
+ helper_->DeleteProgram(programs[0]);
+}
+
+bool GLES2Implementation::DeleteShaderHelper(GLuint shader) {
+ if (!GetIdHandler(id_namespaces::kProgramsAndShaders)->FreeIds(
+ this, 1, &shader, &GLES2Implementation::DeleteShaderStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteShader", "id not created by this context.");
+ return false;
+ }
+ return true;
+}
+
+void GLES2Implementation::DeleteShaderStub(
+ GLsizei n, const GLuint* shaders) {
+ DCHECK_EQ(1, n);
+ share_group_->program_info_manager()->DeleteInfo(shaders[0]);
+ helper_->DeleteShader(shaders[0]);
+}
+
+
+GLint GLES2Implementation::GetAttribLocationHelper(
+ GLuint program, const char* name) {
+ typedef cmds::GetAttribLocation::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return -1;
+ }
+ *result = -1;
+ SetBucketAsCString(kResultBucketId, name);
+ helper_->GetAttribLocation(
+ program, kResultBucketId, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ helper_->SetBucketSize(kResultBucketId, 0);
+ return *result;
+}
+
+GLint GLES2Implementation::GetAttribLocation(
+ GLuint program, const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetAttribLocation(" << program
+ << ", " << name << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetAttribLocation");
+ GLint loc = share_group_->program_info_manager()->GetAttribLocation(
+ this, program, name);
+ GPU_CLIENT_LOG("returned " << loc);
+ CheckGLError();
+ return loc;
+}
+
+GLint GLES2Implementation::GetUniformLocationHelper(
+ GLuint program, const char* name) {
+ typedef cmds::GetUniformLocation::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return -1;
+ }
+ *result = -1;
+ SetBucketAsCString(kResultBucketId, name);
+ helper_->GetUniformLocation(program, kResultBucketId,
+ GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ helper_->SetBucketSize(kResultBucketId, 0);
+ return *result;
+}
+
+GLint GLES2Implementation::GetUniformLocation(
+ GLuint program, const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformLocation(" << program
+ << ", " << name << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetUniformLocation");
+ GLint loc = share_group_->program_info_manager()->GetUniformLocation(
+ this, program, name);
+ GPU_CLIENT_LOG("returned " << loc);
+ CheckGLError();
+ return loc;
+}
+
+bool GLES2Implementation::GetProgramivHelper(
+ GLuint program, GLenum pname, GLint* params) {
+ bool got_value = share_group_->program_info_manager()->GetProgramiv(
+ this, program, pname, params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ if (got_value) {
+ GPU_CLIENT_LOG(" 0: " << *params);
+ }
+ });
+ return got_value;
+}
+
+void GLES2Implementation::LinkProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLinkProgram(" << program << ")");
+ helper_->LinkProgram(program);
+ share_group_->program_info_manager()->CreateInfo(program);
+ CheckGLError();
+}
+
+void GLES2Implementation::ShaderBinary(
+ GLsizei n, const GLuint* shaders, GLenum binaryformat, const void* binary,
+ GLsizei length) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShaderBinary(" << n << ", "
+ << static_cast<const void*>(shaders) << ", "
+ << GLES2Util::GetStringEnum(binaryformat) << ", "
+ << static_cast<const void*>(binary) << ", "
+ << length << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glShaderBinary", "n < 0.");
+ return;
+ }
+ if (length < 0) {
+ SetGLError(GL_INVALID_VALUE, "glShaderBinary", "length < 0.");
+ return;
+ }
+ // TODO(gman): ShaderBinary should use buckets.
+ unsigned int shader_id_size = n * sizeof(*shaders);
+ ScopedTransferBufferArray<GLint> buffer(
+ shader_id_size + length, helper_, transfer_buffer_);
+ if (!buffer.valid() || buffer.num_elements() != shader_id_size + length) {
+ SetGLError(GL_OUT_OF_MEMORY, "glShaderBinary", "out of memory.");
+ return;
+ }
+ void* shader_ids = buffer.elements();
+ void* shader_data = buffer.elements() + shader_id_size;
+ memcpy(shader_ids, shaders, shader_id_size);
+ memcpy(shader_data, binary, length);
+ helper_->ShaderBinary(
+ n,
+ buffer.shm_id(),
+ buffer.offset(),
+ binaryformat,
+ buffer.shm_id(),
+ buffer.offset() + shader_id_size,
+ length);
+ CheckGLError();
+}
+
+void GLES2Implementation::PixelStorei(GLenum pname, GLint param) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPixelStorei("
+ << GLES2Util::GetStringPixelStore(pname) << ", "
+ << param << ")");
+ switch (pname) {
+ case GL_PACK_ALIGNMENT:
+ pack_alignment_ = param;
+ break;
+ case GL_UNPACK_ALIGNMENT:
+ unpack_alignment_ = param;
+ break;
+ case GL_UNPACK_ROW_LENGTH_EXT:
+ unpack_row_length_ = param;
+ return;
+ case GL_UNPACK_SKIP_ROWS_EXT:
+ unpack_skip_rows_ = param;
+ return;
+ case GL_UNPACK_SKIP_PIXELS_EXT:
+ unpack_skip_pixels_ = param;
+ return;
+ case GL_UNPACK_FLIP_Y_CHROMIUM:
+ unpack_flip_y_ = (param != 0);
+ break;
+ case GL_PACK_REVERSE_ROW_ORDER_ANGLE:
+ pack_reverse_row_order_ =
+ IsAnglePackReverseRowOrderAvailable() ? (param != 0) : false;
+ break;
+ default:
+ break;
+ }
+ helper_->PixelStorei(pname, param);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttribPointer(
+ GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride,
+ const void* ptr) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribPointer("
+ << index << ", "
+ << size << ", "
+ << GLES2Util::GetStringVertexAttribType(type) << ", "
+ << GLES2Util::GetStringBool(normalized) << ", "
+ << stride << ", "
+ << static_cast<const void*>(ptr) << ")");
+ // Record the info on the client side.
+ if (!vertex_array_object_manager_->SetAttribPointer(
+ bound_array_buffer_id_, index, size, type, normalized, stride, ptr)) {
+ SetGLError(GL_INVALID_OPERATION, "glVertexAttribPointer",
+ "client side arrays are not allowed in vertex array objects.");
+ return;
+ }
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ if (bound_array_buffer_id_ != 0) {
+ // Only report NON client side buffers to the service.
+ if (!ValidateOffset("glVertexAttribPointer",
+ reinterpret_cast<GLintptr>(ptr))) {
+ return;
+ }
+ helper_->VertexAttribPointer(index, size, type, normalized, stride,
+ ToGLuint(ptr));
+ }
+#else // !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ if (!ValidateOffset("glVertexAttribPointer",
+ reinterpret_cast<GLintptr>(ptr))) {
+ return;
+ }
+ helper_->VertexAttribPointer(index, size, type, normalized, stride,
+ ToGLuint(ptr));
+#endif // !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttribDivisorANGLE(
+ GLuint index, GLuint divisor) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttribDivisorANGLE("
+ << index << ", "
+ << divisor << ") ");
+ // Record the info on the client side.
+ vertex_array_object_manager_->SetAttribDivisor(index, divisor);
+ helper_->VertexAttribDivisorANGLE(index, divisor);
+ CheckGLError();
+}
+
+void GLES2Implementation::ShaderSource(
+ GLuint shader,
+ GLsizei count,
+ const GLchar* const* source,
+ const GLint* length) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glShaderSource("
+ << shader << ", " << count << ", "
+ << static_cast<const void*>(source) << ", "
+ << static_cast<const void*>(length) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (source[ii]) {
+ if (length && length[ii] >= 0) {
+ std::string str(source[ii], length[ii]);
+ GPU_CLIENT_LOG(" " << ii << ": ---\n" << str << "\n---");
+ } else {
+ GPU_CLIENT_LOG(" " << ii << ": ---\n" << source[ii] << "\n---");
+ }
+ } else {
+ GPU_CLIENT_LOG(" " << ii << ": NULL");
+ }
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glShaderSource", "count < 0");
+ return;
+ }
+ if (shader == 0) {
+ SetGLError(GL_INVALID_VALUE, "glShaderSource", "shader == 0");
+ return;
+ }
+
+ // Compute the total size.
+ uint32 total_size = 1;
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (source[ii]) {
+ total_size += (length && length[ii] >= 0) ?
+ static_cast<size_t>(length[ii]) : strlen(source[ii]);
+ }
+ }
+
+ // Concatenate all the strings in to a bucket on the service.
+ helper_->SetBucketSize(kResultBucketId, total_size);
+ uint32 offset = 0;
+ for (GLsizei ii = 0; ii <= count; ++ii) {
+ const char* src = ii < count ? source[ii] : "";
+ if (src) {
+ uint32 size = ii < count ?
+ (length ? static_cast<size_t>(length[ii]) : strlen(src)) : 1;
+ while (size) {
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+ memcpy(buffer.address(), src, buffer.size());
+ helper_->SetBucketData(kResultBucketId, offset, buffer.size(),
+ buffer.shm_id(), buffer.offset());
+ offset += buffer.size();
+ src += buffer.size();
+ size -= buffer.size();
+ }
+ }
+ }
+
+ DCHECK_EQ(total_size, offset);
+
+ helper_->ShaderSourceBucket(shader, kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+void GLES2Implementation::BufferDataHelper(
+ GLenum target, GLsizeiptr size, const void* data, GLenum usage) {
+ if (!ValidateSize("glBufferData", size))
+ return;
+
+ GLuint buffer_id;
+ if (GetBoundPixelTransferBuffer(target, "glBufferData", &buffer_id)) {
+ if (!buffer_id) {
+ return;
+ }
+
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (buffer)
+ RemoveTransferBuffer(buffer);
+
+ // Create new buffer.
+ buffer = buffer_tracker_->CreateBuffer(buffer_id, size);
+ DCHECK(buffer);
+ if (buffer->address() && data)
+ memcpy(buffer->address(), data, size);
+ return;
+ }
+
+ if (size == 0) {
+ return;
+ }
+
+ // If there is no data just send BufferData
+ if (!data) {
+ helper_->BufferData(target, size, 0, 0, usage);
+ return;
+ }
+
+ // See if we can send all at once.
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+
+ if (buffer.size() >= static_cast<unsigned int>(size)) {
+ memcpy(buffer.address(), data, size);
+ helper_->BufferData(
+ target,
+ size,
+ buffer.shm_id(),
+ buffer.offset(),
+ usage);
+ return;
+ }
+
+ // Make the buffer with BufferData then send via BufferSubData
+ helper_->BufferData(target, size, 0, 0, usage);
+ BufferSubDataHelperImpl(target, 0, size, data, &buffer);
+ CheckGLError();
+}
+
+void GLES2Implementation::BufferData(
+ GLenum target, GLsizeiptr size, const void* data, GLenum usage) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferData("
+ << GLES2Util::GetStringBufferTarget(target) << ", "
+ << size << ", "
+ << static_cast<const void*>(data) << ", "
+ << GLES2Util::GetStringBufferUsage(usage) << ")");
+ BufferDataHelper(target, size, data, usage);
+ CheckGLError();
+}
+
+void GLES2Implementation::BufferSubDataHelper(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data) {
+ if (size == 0) {
+ return;
+ }
+
+ if (!ValidateSize("glBufferSubData", size) ||
+ !ValidateOffset("glBufferSubData", offset)) {
+ return;
+ }
+
+ GLuint buffer_id;
+ if (GetBoundPixelTransferBuffer(target, "glBufferSubData", &buffer_id)) {
+ if (!buffer_id) {
+ return;
+ }
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (!buffer) {
+ SetGLError(GL_INVALID_VALUE, "glBufferSubData", "unknown buffer");
+ return;
+ }
+
+ int32 end = 0;
+ int32 buffer_size = buffer->size();
+ if (!SafeAddInt32(offset, size, &end) || end > buffer_size) {
+ SetGLError(GL_INVALID_VALUE, "glBufferSubData", "out of range");
+ return;
+ }
+
+ if (buffer->address() && data)
+ memcpy(static_cast<uint8*>(buffer->address()) + offset, data, size);
+ return;
+ }
+
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ BufferSubDataHelperImpl(target, offset, size, data, &buffer);
+}
+
+void GLES2Implementation::BufferSubDataHelperImpl(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data,
+ ScopedTransferBufferPtr* buffer) {
+ DCHECK(buffer);
+ DCHECK_GT(size, 0);
+
+ const int8* source = static_cast<const int8*>(data);
+ while (size) {
+ if (!buffer->valid() || buffer->size() == 0) {
+ buffer->Reset(size);
+ if (!buffer->valid()) {
+ return;
+ }
+ }
+ memcpy(buffer->address(), source, buffer->size());
+ helper_->BufferSubData(
+ target, offset, buffer->size(), buffer->shm_id(), buffer->offset());
+ offset += buffer->size();
+ source += buffer->size();
+ size -= buffer->size();
+ buffer->Release();
+ }
+}
+
+void GLES2Implementation::BufferSubData(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferSubData("
+ << GLES2Util::GetStringBufferTarget(target) << ", "
+ << offset << ", " << size << ", "
+ << static_cast<const void*>(data) << ")");
+ BufferSubDataHelper(target, offset, size, data);
+ CheckGLError();
+}
+
+void GLES2Implementation::RemoveTransferBuffer(BufferTracker::Buffer* buffer) {
+ int32 token = buffer->last_usage_token();
+ uint32 async_token = buffer->last_async_upload_token();
+
+ if (async_token) {
+ if (HasAsyncUploadTokenPassed(async_token)) {
+ buffer_tracker_->Free(buffer);
+ } else {
+ detached_async_upload_memory_.push_back(
+ std::make_pair(buffer->address(), async_token));
+ buffer_tracker_->Unmanage(buffer);
+ }
+ } else if (token) {
+ if (helper_->HasTokenPassed(token))
+ buffer_tracker_->Free(buffer);
+ else
+ buffer_tracker_->FreePendingToken(buffer, token);
+ } else {
+ buffer_tracker_->Free(buffer);
+ }
+
+ buffer_tracker_->RemoveBuffer(buffer->id());
+}
+
+bool GLES2Implementation::GetBoundPixelTransferBuffer(
+ GLenum target,
+ const char* function_name,
+ GLuint* buffer_id) {
+ *buffer_id = 0;
+
+ switch (target) {
+ case GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM:
+ *buffer_id = bound_pixel_pack_transfer_buffer_id_;
+ break;
+ case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
+ *buffer_id = bound_pixel_unpack_transfer_buffer_id_;
+ break;
+ default:
+ // Unknown target
+ return false;
+ }
+ if (!*buffer_id) {
+ SetGLError(GL_INVALID_OPERATION, function_name, "no buffer bound");
+ }
+ return true;
+}
+
+BufferTracker::Buffer*
+GLES2Implementation::GetBoundPixelUnpackTransferBufferIfValid(
+ GLuint buffer_id,
+ const char* function_name,
+ GLuint offset, GLsizei size) {
+ DCHECK(buffer_id);
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (!buffer) {
+ SetGLError(GL_INVALID_OPERATION, function_name, "invalid buffer");
+ return NULL;
+ }
+ if (buffer->mapped()) {
+ SetGLError(GL_INVALID_OPERATION, function_name, "buffer mapped");
+ return NULL;
+ }
+ if ((buffer->size() - offset) < static_cast<GLuint>(size)) {
+ SetGLError(GL_INVALID_VALUE, function_name, "unpack size to large");
+ return NULL;
+ }
+ return buffer;
+}
+
+void GLES2Implementation::CompressedTexImage2D(
+ GLenum target, GLint level, GLenum internalformat, GLsizei width,
+ GLsizei height, GLint border, GLsizei image_size, const void* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCompressedTexImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << GLES2Util::GetStringCompressedTextureFormat(internalformat) << ", "
+ << width << ", " << height << ", " << border << ", "
+ << image_size << ", "
+ << static_cast<const void*>(data) << ")");
+ if (width < 0 || height < 0 || level < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCompressedTexImage2D", "dimension < 0");
+ return;
+ }
+ if (border != 0) {
+ SetGLError(GL_INVALID_VALUE, "glCompressedTexImage2D", "border != 0");
+ return;
+ }
+ if (height == 0 || width == 0) {
+ return;
+ }
+ // If there's a pixel unpack buffer bound use it when issuing
+ // CompressedTexImage2D.
+ if (bound_pixel_unpack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(data);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glCompressedTexImage2D", offset, image_size);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->CompressedTexImage2D(
+ target, level, internalformat, width, height, image_size,
+ buffer->shm_id(), buffer->shm_offset() + offset);
+ buffer->set_last_usage_token(helper_->InsertToken());
+ }
+ return;
+ }
+ SetBucketContents(kResultBucketId, data, image_size);
+ helper_->CompressedTexImage2DBucket(
+ target, level, internalformat, width, height, kResultBucketId);
+ // Free the bucket. This is not required but it does free up the memory.
+ // and we don't have to wait for the result so from the client's perspective
+ // it's cheap.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+void GLES2Implementation::CompressedTexSubImage2D(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLsizei image_size, const void* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCompressedTexSubImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringCompressedTextureFormat(format) << ", "
+ << image_size << ", "
+ << static_cast<const void*>(data) << ")");
+ if (width < 0 || height < 0 || level < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCompressedTexSubImage2D", "dimension < 0");
+ return;
+ }
+ // If there's a pixel unpack buffer bound use it when issuing
+ // CompressedTexSubImage2D.
+ if (bound_pixel_unpack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(data);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glCompressedTexSubImage2D", offset, image_size);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->CompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, image_size,
+ buffer->shm_id(), buffer->shm_offset() + offset);
+ buffer->set_last_usage_token(helper_->InsertToken());
+ CheckGLError();
+ }
+ return;
+ }
+ SetBucketContents(kResultBucketId, data, image_size);
+ helper_->CompressedTexSubImage2DBucket(
+ target, level, xoffset, yoffset, width, height, format, kResultBucketId);
+ // Free the bucket. This is not required but it does free up the memory.
+ // and we don't have to wait for the result so from the client's perspective
+ // it's cheap.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ CheckGLError();
+}
+
+namespace {
+
+void CopyRectToBuffer(
+ const void* pixels,
+ uint32 height,
+ uint32 unpadded_row_size,
+ uint32 pixels_padded_row_size,
+ bool flip_y,
+ void* buffer,
+ uint32 buffer_padded_row_size) {
+ const int8* source = static_cast<const int8*>(pixels);
+ int8* dest = static_cast<int8*>(buffer);
+ if (flip_y || pixels_padded_row_size != buffer_padded_row_size) {
+ if (flip_y) {
+ dest += buffer_padded_row_size * (height - 1);
+ }
+ // the last row is copied unpadded at the end
+ for (; height > 1; --height) {
+ memcpy(dest, source, buffer_padded_row_size);
+ if (flip_y) {
+ dest -= buffer_padded_row_size;
+ } else {
+ dest += buffer_padded_row_size;
+ }
+ source += pixels_padded_row_size;
+ }
+ memcpy(dest, source, unpadded_row_size);
+ } else {
+ uint32 size = (height - 1) * pixels_padded_row_size + unpadded_row_size;
+ memcpy(dest, source, size);
+ }
+}
+
+} // anonymous namespace
+
+void GLES2Implementation::TexImage2D(
+ GLenum target, GLint level, GLint internalformat, GLsizei width,
+ GLsizei height, GLint border, GLenum format, GLenum type,
+ const void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << width << ", " << height << ", " << border << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+ if (level < 0 || height < 0 || width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "dimension < 0");
+ return;
+ }
+ if (border != 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "border != 0");
+ return;
+ }
+ uint32 size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &size,
+ &unpadded_row_size, &padded_row_size)) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "image size too large");
+ return;
+ }
+
+ // If there's a pixel unpack buffer bound use it when issuing TexImage2D.
+ if (bound_pixel_unpack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glTexImage2D", offset, size);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->TexImage2D(
+ target, level, internalformat, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset() + offset);
+ buffer->set_last_usage_token(helper_->InsertToken());
+ CheckGLError();
+ }
+ return;
+ }
+
+ // If there's no data just issue TexImage2D
+ if (!pixels) {
+ helper_->TexImage2D(
+ target, level, internalformat, width, height, format, type,
+ 0, 0);
+ CheckGLError();
+ return;
+ }
+
+ // compute the advance bytes per row for the src pixels
+ uint32 src_padded_row_size;
+ if (unpack_row_length_ > 0) {
+ if (!GLES2Util::ComputeImagePaddedRowSize(
+ unpack_row_length_, format, type, unpack_alignment_,
+ &src_padded_row_size)) {
+ SetGLError(
+ GL_INVALID_VALUE, "glTexImage2D", "unpack row length too large");
+ return;
+ }
+ } else {
+ src_padded_row_size = padded_row_size;
+ }
+
+ // advance pixels pointer past the skip rows and skip pixels
+ pixels = reinterpret_cast<const int8*>(pixels) +
+ unpack_skip_rows_ * src_padded_row_size;
+ if (unpack_skip_pixels_) {
+ uint32 group_size = GLES2Util::ComputeImageGroupSize(format, type);
+ pixels = reinterpret_cast<const int8*>(pixels) +
+ unpack_skip_pixels_ * group_size;
+ }
+
+ // Check if we can send it all at once.
+ ScopedTransferBufferPtr buffer(size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+
+ if (buffer.size() >= size) {
+ CopyRectToBuffer(
+ pixels, height, unpadded_row_size, src_padded_row_size, unpack_flip_y_,
+ buffer.address(), padded_row_size);
+ helper_->TexImage2D(
+ target, level, internalformat, width, height, format, type,
+ buffer.shm_id(), buffer.offset());
+ CheckGLError();
+ return;
+ }
+
+ // No, so send it using TexSubImage2D.
+ helper_->TexImage2D(
+ target, level, internalformat, width, height, format, type,
+ 0, 0);
+ TexSubImage2DImpl(
+ target, level, 0, 0, width, height, format, type, unpadded_row_size,
+ pixels, src_padded_row_size, GL_TRUE, &buffer, padded_row_size);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexSubImage2D(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLenum type, const void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexSubImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+
+ if (level < 0 || height < 0 || width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexSubImage2D", "dimension < 0");
+ return;
+ }
+ if (height == 0 || width == 0) {
+ return;
+ }
+
+ uint32 temp_size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &temp_size,
+ &unpadded_row_size, &padded_row_size)) {
+ SetGLError(GL_INVALID_VALUE, "glTexSubImage2D", "size to large");
+ return;
+ }
+
+ // If there's a pixel unpack buffer bound use it when issuing TexSubImage2D.
+ if (bound_pixel_unpack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glTexSubImage2D", offset, temp_size);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->TexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset() + offset, false);
+ buffer->set_last_usage_token(helper_->InsertToken());
+ CheckGLError();
+ }
+ return;
+ }
+
+ // compute the advance bytes per row for the src pixels
+ uint32 src_padded_row_size;
+ if (unpack_row_length_ > 0) {
+ if (!GLES2Util::ComputeImagePaddedRowSize(
+ unpack_row_length_, format, type, unpack_alignment_,
+ &src_padded_row_size)) {
+ SetGLError(
+ GL_INVALID_VALUE, "glTexImage2D", "unpack row length too large");
+ return;
+ }
+ } else {
+ src_padded_row_size = padded_row_size;
+ }
+
+ // advance pixels pointer past the skip rows and skip pixels
+ pixels = reinterpret_cast<const int8*>(pixels) +
+ unpack_skip_rows_ * src_padded_row_size;
+ if (unpack_skip_pixels_) {
+ uint32 group_size = GLES2Util::ComputeImageGroupSize(format, type);
+ pixels = reinterpret_cast<const int8*>(pixels) +
+ unpack_skip_pixels_ * group_size;
+ }
+
+ ScopedTransferBufferPtr buffer(temp_size, helper_, transfer_buffer_);
+ TexSubImage2DImpl(
+ target, level, xoffset, yoffset, width, height, format, type,
+ unpadded_row_size, pixels, src_padded_row_size, GL_FALSE, &buffer,
+ padded_row_size);
+ CheckGLError();
+}
+
+static GLint ComputeNumRowsThatFitInBuffer(
+ uint32 padded_row_size, uint32 unpadded_row_size,
+ unsigned int size) {
+ DCHECK_GE(unpadded_row_size, 0u);
+ if (padded_row_size == 0) {
+ return 1;
+ }
+ GLint num_rows = size / padded_row_size;
+ return num_rows + (size - num_rows * padded_row_size) / unpadded_row_size;
+}
+
+void GLES2Implementation::TexSubImage2DImpl(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLenum type, uint32 unpadded_row_size,
+ const void* pixels, uint32 pixels_padded_row_size, GLboolean internal,
+ ScopedTransferBufferPtr* buffer, uint32 buffer_padded_row_size) {
+ DCHECK(buffer);
+ DCHECK_GE(level, 0);
+ DCHECK_GT(height, 0);
+ DCHECK_GT(width, 0);
+
+ const int8* source = reinterpret_cast<const int8*>(pixels);
+ GLint original_yoffset = yoffset;
+ // Transfer by rows.
+ while (height) {
+ unsigned int desired_size =
+ buffer_padded_row_size * (height - 1) + unpadded_row_size;
+ if (!buffer->valid() || buffer->size() == 0) {
+ buffer->Reset(desired_size);
+ if (!buffer->valid()) {
+ return;
+ }
+ }
+
+ GLint num_rows = ComputeNumRowsThatFitInBuffer(
+ buffer_padded_row_size, unpadded_row_size, buffer->size());
+ num_rows = std::min(num_rows, height);
+ CopyRectToBuffer(
+ source, num_rows, unpadded_row_size, pixels_padded_row_size,
+ unpack_flip_y_, buffer->address(), buffer_padded_row_size);
+ GLint y = unpack_flip_y_ ? original_yoffset + height - num_rows : yoffset;
+ helper_->TexSubImage2D(
+ target, level, xoffset, y, width, num_rows, format, type,
+ buffer->shm_id(), buffer->offset(), internal);
+ buffer->Release();
+ yoffset += num_rows;
+ source += num_rows * pixels_padded_row_size;
+ height -= num_rows;
+ }
+}
+
+bool GLES2Implementation::GetActiveAttribHelper(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size,
+ GLenum* type, char* name) {
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ typedef cmds::GetActiveAttrib::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ // Set as failed so if the command fails we'll recover.
+ result->success = false;
+ helper_->GetActiveAttrib(program, index, kResultBucketId,
+ GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ if (result->success) {
+ if (size) {
+ *size = result->size;
+ }
+ if (type) {
+ *type = result->type;
+ }
+ if (length || name) {
+ std::vector<int8> str;
+ GetBucketContents(kResultBucketId, &str);
+ GLsizei max_size = std::min(static_cast<size_t>(bufsize) - 1,
+ std::max(static_cast<size_t>(0),
+ str.size() - 1));
+ if (length) {
+ *length = max_size;
+ }
+ if (name && bufsize > 0) {
+ memcpy(name, &str[0], max_size);
+ name[max_size] = '\0';
+ }
+ }
+ }
+ return result->success != 0;
+}
+
+void GLES2Implementation::GetActiveAttrib(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size,
+ GLenum* type, char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetActiveAttrib("
+ << program << ", " << index << ", " << bufsize << ", "
+ << static_cast<const void*>(length) << ", "
+ << static_cast<const void*>(size) << ", "
+ << static_cast<const void*>(type) << ", "
+ << static_cast<const void*>(name) << ", ");
+ if (bufsize < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetActiveAttrib", "bufsize < 0");
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetActiveAttrib");
+ bool success = share_group_->program_info_manager()->GetActiveAttrib(
+ this, program, index, bufsize, length, size, type, name);
+ if (success) {
+ if (size) {
+ GPU_CLIENT_LOG(" size: " << *size);
+ }
+ if (type) {
+ GPU_CLIENT_LOG(" type: " << GLES2Util::GetStringEnum(*type));
+ }
+ if (name) {
+ GPU_CLIENT_LOG(" name: " << name);
+ }
+ }
+ CheckGLError();
+}
+
+bool GLES2Implementation::GetActiveUniformHelper(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size,
+ GLenum* type, char* name) {
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ typedef cmds::GetActiveUniform::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ // Set as failed so if the command fails we'll recover.
+ result->success = false;
+ helper_->GetActiveUniform(program, index, kResultBucketId,
+ GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ if (result->success) {
+ if (size) {
+ *size = result->size;
+ }
+ if (type) {
+ *type = result->type;
+ }
+ if (length || name) {
+ std::vector<int8> str;
+ GetBucketContents(kResultBucketId, &str);
+ GLsizei max_size = std::min(static_cast<size_t>(bufsize) - 1,
+ std::max(static_cast<size_t>(0),
+ str.size() - 1));
+ if (length) {
+ *length = max_size;
+ }
+ if (name && bufsize > 0) {
+ memcpy(name, &str[0], max_size);
+ name[max_size] = '\0';
+ }
+ }
+ }
+ return result->success != 0;
+}
+
+void GLES2Implementation::GetActiveUniform(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size,
+ GLenum* type, char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetActiveUniform("
+ << program << ", " << index << ", " << bufsize << ", "
+ << static_cast<const void*>(length) << ", "
+ << static_cast<const void*>(size) << ", "
+ << static_cast<const void*>(type) << ", "
+ << static_cast<const void*>(name) << ", ");
+ if (bufsize < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetActiveUniform", "bufsize < 0");
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetActiveUniform");
+ bool success = share_group_->program_info_manager()->GetActiveUniform(
+ this, program, index, bufsize, length, size, type, name);
+ if (success) {
+ if (size) {
+ GPU_CLIENT_LOG(" size: " << *size);
+ }
+ if (type) {
+ GPU_CLIENT_LOG(" type: " << GLES2Util::GetStringEnum(*type));
+ }
+ if (name) {
+ GPU_CLIENT_LOG(" name: " << name);
+ }
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::GetAttachedShaders(
+ GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetAttachedShaders("
+ << program << ", " << maxcount << ", "
+ << static_cast<const void*>(count) << ", "
+ << static_cast<const void*>(shaders) << ", ");
+ if (maxcount < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGetAttachedShaders", "maxcount < 0");
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetAttachedShaders");
+ typedef cmds::GetAttachedShaders::Result Result;
+ uint32 size = Result::ComputeSize(maxcount);
+ Result* result = static_cast<Result*>(transfer_buffer_->Alloc(size));
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetAttachedShaders(
+ program,
+ transfer_buffer_->GetShmId(),
+ transfer_buffer_->GetOffset(result),
+ size);
+ int32 token = helper_->InsertToken();
+ WaitForCmd();
+ if (count) {
+ *count = result->GetNumResults();
+ }
+ result->CopyResult(shaders);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ transfer_buffer_->FreePendingToken(result, token);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetShaderPrecisionFormat(
+ GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetShaderPrecisionFormat("
+ << GLES2Util::GetStringShaderType(shadertype) << ", "
+ << GLES2Util::GetStringShaderPrecision(precisiontype) << ", "
+ << static_cast<const void*>(range) << ", "
+ << static_cast<const void*>(precision) << ", ");
+ TRACE_EVENT0("gpu", "GLES2::GetShaderPrecisionFormat");
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+
+ GLStaticState::ShaderPrecisionKey key(shadertype, precisiontype);
+ GLStaticState::ShaderPrecisionMap::iterator i =
+ static_state_.shader_precisions.find(key);
+ if (i != static_state_.shader_precisions.end()) {
+ *result = i->second;
+ } else {
+ result->success = false;
+ helper_->GetShaderPrecisionFormat(
+ shadertype, precisiontype, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ if (result->success)
+ static_state_.shader_precisions[key] = *result;
+ }
+
+ if (result->success) {
+ if (range) {
+ range[0] = result->min_range;
+ range[1] = result->max_range;
+ GPU_CLIENT_LOG(" min_range: " << range[0]);
+ GPU_CLIENT_LOG(" min_range: " << range[1]);
+ }
+ if (precision) {
+ precision[0] = result->precision;
+ GPU_CLIENT_LOG(" min_range: " << precision[0]);
+ }
+ }
+ CheckGLError();
+}
+
+const GLubyte* GLES2Implementation::GetStringHelper(GLenum name) {
+ const char* result = NULL;
+ // Clears the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetString(name, kResultBucketId);
+ std::string str;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ // Adds extensions implemented on client side only.
+ switch (name) {
+ case GL_EXTENSIONS:
+ str += std::string(str.empty() ? "" : " ") +
+ "GL_CHROMIUM_flipy "
+ "GL_EXT_unpack_subimage "
+ "GL_CHROMIUM_map_sub";
+ if (capabilities_.image)
+ str += " GL_CHROMIUM_image GL_CHROMIUM_gpu_memory_buffer_image";
+ if (capabilities_.future_sync_points)
+ str += " GL_CHROMIUM_future_sync_point";
+ break;
+ default:
+ break;
+ }
+
+ // Because of WebGL the extensions can change. We have to cache each unique
+ // result since we don't know when the client will stop referring to a
+ // previous one it queries.
+ GLStringMap::iterator it = gl_strings_.find(name);
+ if (it == gl_strings_.end()) {
+ std::set<std::string> strings;
+ std::pair<GLStringMap::iterator, bool> insert_result =
+ gl_strings_.insert(std::make_pair(name, strings));
+ DCHECK(insert_result.second);
+ it = insert_result.first;
+ }
+ std::set<std::string>& string_set = it->second;
+ std::set<std::string>::const_iterator sit = string_set.find(str);
+ if (sit != string_set.end()) {
+ result = sit->c_str();
+ } else {
+ std::pair<std::set<std::string>::const_iterator, bool> insert_result =
+ string_set.insert(str);
+ DCHECK(insert_result.second);
+ result = insert_result.first->c_str();
+ }
+ }
+ return reinterpret_cast<const GLubyte*>(result);
+}
+
+const GLubyte* GLES2Implementation::GetString(GLenum name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetString("
+ << GLES2Util::GetStringStringType(name) << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetString");
+ const GLubyte* result = GetStringHelper(name);
+ GPU_CLIENT_LOG(" returned " << reinterpret_cast<const char*>(result));
+ CheckGLError();
+ return result;
+}
+
+void GLES2Implementation::GetUniformfv(
+ GLuint program, GLint location, GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformfv("
+ << program << ", " << location << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetUniformfv");
+ typedef cmds::GetUniformfv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetUniformfv(
+ program, location, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GetUniformiv(
+ GLuint program, GLint location, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetUniformiv("
+ << program << ", " << location << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2::GetUniformiv");
+ typedef cmds::GetUniformiv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetUniformiv(
+ program, location, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GetResultAs<cmds::GetUniformfv::Result*>()->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::ReadPixels(
+ GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format,
+ GLenum type, void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glReadPixels("
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringReadPixelFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+ if (width < 0 || height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions < 0");
+ return;
+ }
+ if (width == 0 || height == 0) {
+ return;
+ }
+
+ // glReadPixel pads the size of each row of pixels by an amount specified by
+ // glPixelStorei. So, we have to take that into account both in the fact that
+ // the pixels returned from the ReadPixel command will include that padding
+ // and that when we copy the results to the user's buffer we need to not
+ // write those padding bytes but leave them as they are.
+
+ TRACE_EVENT0("gpu", "GLES2::ReadPixels");
+ typedef cmds::ReadPixels::Result Result;
+
+ int8* dest = reinterpret_cast<int8*>(pixels);
+ uint32 temp_size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, 2, format, type, pack_alignment_, &temp_size, &unpadded_row_size,
+ &padded_row_size)) {
+ SetGLError(GL_INVALID_VALUE, "glReadPixels", "size too large.");
+ return;
+ }
+
+ if (bound_pixel_pack_transfer_buffer_id_) {
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_pack_transfer_buffer_id_,
+ "glReadPixels", offset, padded_row_size * height);
+ if (buffer && buffer->shm_id() != -1) {
+ helper_->ReadPixels(xoffset, yoffset, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset(),
+ 0, 0, true);
+ CheckGLError();
+ }
+ return;
+ }
+
+ if (!pixels) {
+ SetGLError(GL_INVALID_OPERATION, "glReadPixels", "pixels = NULL");
+ return;
+ }
+
+ // Transfer by rows.
+ // The max rows we can transfer.
+ while (height) {
+ GLsizei desired_size = padded_row_size * height - 1 + unpadded_row_size;
+ ScopedTransferBufferPtr buffer(desired_size, helper_, transfer_buffer_);
+ if (!buffer.valid()) {
+ return;
+ }
+ GLint num_rows = ComputeNumRowsThatFitInBuffer(
+ padded_row_size, unpadded_row_size, buffer.size());
+ num_rows = std::min(num_rows, height);
+ // NOTE: We must look up the address of the result area AFTER allocation
+ // of the transfer buffer since the transfer buffer may be reallocated.
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ *result = 0; // mark as failed.
+ helper_->ReadPixels(
+ xoffset, yoffset, width, num_rows, format, type,
+ buffer.shm_id(), buffer.offset(),
+ GetResultShmId(), GetResultShmOffset(),
+ false);
+ WaitForCmd();
+ if (*result != 0) {
+ // when doing a y-flip we have to iterate through top-to-bottom chunks
+ // of the dst. The service side handles reversing the rows within a
+ // chunk.
+ int8* rows_dst;
+ if (pack_reverse_row_order_) {
+ rows_dst = dest + (height - num_rows) * padded_row_size;
+ } else {
+ rows_dst = dest;
+ }
+ // We have to copy 1 row at a time to avoid writing pad bytes.
+ const int8* src = static_cast<const int8*>(buffer.address());
+ for (GLint yy = 0; yy < num_rows; ++yy) {
+ memcpy(rows_dst, src, unpadded_row_size);
+ rows_dst += padded_row_size;
+ src += padded_row_size;
+ }
+ if (!pack_reverse_row_order_) {
+ dest = rows_dst;
+ }
+ }
+ // If it was not marked as successful exit.
+ if (*result == 0) {
+ return;
+ }
+ yoffset += num_rows;
+ height -= num_rows;
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::ActiveTexture(GLenum texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glActiveTexture("
+ << GLES2Util::GetStringEnum(texture) << ")");
+ GLuint texture_index = texture - GL_TEXTURE0;
+ if (texture_index >= static_cast<GLuint>(
+ static_state_.int_state.max_combined_texture_image_units)) {
+ SetGLErrorInvalidEnum(
+ "glActiveTexture", texture, "texture");
+ return;
+ }
+
+ active_texture_unit_ = texture_index;
+ helper_->ActiveTexture(texture);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenBuffersHelper(
+ GLsizei /* n */, const GLuint* /* buffers */) {
+}
+
+void GLES2Implementation::GenFramebuffersHelper(
+ GLsizei /* n */, const GLuint* /* framebuffers */) {
+}
+
+void GLES2Implementation::GenRenderbuffersHelper(
+ GLsizei /* n */, const GLuint* /* renderbuffers */) {
+}
+
+void GLES2Implementation::GenTexturesHelper(
+ GLsizei /* n */, const GLuint* /* textures */) {
+}
+
+void GLES2Implementation::GenVertexArraysOESHelper(
+ GLsizei n, const GLuint* arrays) {
+ vertex_array_object_manager_->GenVertexArrays(n, arrays);
+}
+
+void GLES2Implementation::GenQueriesEXTHelper(
+ GLsizei /* n */, const GLuint* /* queries */) {
+}
+
+// NOTE #1: On old versions of OpenGL, calling glBindXXX with an unused id
+// generates a new resource. On newer versions of OpenGL they don't. The code
+// related to binding below will need to change if we switch to the new OpenGL
+// model. Specifically it assumes a bind will succeed which is always true in
+// the old model but possibly not true in the new model if another context has
+// deleted the resource.
+
+bool GLES2Implementation::BindBufferHelper(
+ GLenum target, GLuint buffer_id) {
+ // TODO(gman): See note #1 above.
+ bool changed = false;
+ switch (target) {
+ case GL_ARRAY_BUFFER:
+ if (bound_array_buffer_id_ != buffer_id) {
+ bound_array_buffer_id_ = buffer_id;
+ changed = true;
+ }
+ break;
+ case GL_ELEMENT_ARRAY_BUFFER:
+ changed = vertex_array_object_manager_->BindElementArray(buffer_id);
+ break;
+ case GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM:
+ bound_pixel_pack_transfer_buffer_id_ = buffer_id;
+ break;
+ case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
+ bound_pixel_unpack_transfer_buffer_id_ = buffer_id;
+ break;
+ default:
+ changed = true;
+ break;
+ }
+ // TODO(gman): There's a bug here. If the target is invalid the ID will not be
+ // used even though it's marked it as used here.
+ GetIdHandler(id_namespaces::kBuffers)->MarkAsUsedForBind(buffer_id);
+ return changed;
+}
+
+bool GLES2Implementation::BindFramebufferHelper(
+ GLenum target, GLuint framebuffer) {
+ // TODO(gman): See note #1 above.
+ bool changed = false;
+ switch (target) {
+ case GL_FRAMEBUFFER:
+ if (bound_framebuffer_ != framebuffer ||
+ bound_read_framebuffer_ != framebuffer) {
+ bound_framebuffer_ = framebuffer;
+ bound_read_framebuffer_ = framebuffer;
+ changed = true;
+ }
+ break;
+ case GL_READ_FRAMEBUFFER:
+ if (!IsChromiumFramebufferMultisampleAvailable()) {
+ SetGLErrorInvalidEnum("glBindFramebuffer", target, "target");
+ return false;
+ }
+ if (bound_read_framebuffer_ != framebuffer) {
+ bound_read_framebuffer_ = framebuffer;
+ changed = true;
+ }
+ break;
+ case GL_DRAW_FRAMEBUFFER:
+ if (!IsChromiumFramebufferMultisampleAvailable()) {
+ SetGLErrorInvalidEnum("glBindFramebuffer", target, "target");
+ return false;
+ }
+ if (bound_framebuffer_ != framebuffer) {
+ bound_framebuffer_ = framebuffer;
+ changed = true;
+ }
+ break;
+ default:
+ SetGLErrorInvalidEnum("glBindFramebuffer", target, "target");
+ return false;
+ }
+ GetIdHandler(id_namespaces::kFramebuffers)->MarkAsUsedForBind(framebuffer);
+ return changed;
+}
+
+bool GLES2Implementation::BindRenderbufferHelper(
+ GLenum target, GLuint renderbuffer) {
+ // TODO(gman): See note #1 above.
+ bool changed = false;
+ switch (target) {
+ case GL_RENDERBUFFER:
+ if (bound_renderbuffer_ != renderbuffer) {
+ bound_renderbuffer_ = renderbuffer;
+ changed = true;
+ }
+ break;
+ default:
+ changed = true;
+ break;
+ }
+ // TODO(gman): There's a bug here. If the target is invalid the ID will not be
+ // used even though it's marked it as used here.
+ GetIdHandler(id_namespaces::kRenderbuffers)->MarkAsUsedForBind(renderbuffer);
+ return changed;
+}
+
+bool GLES2Implementation::BindTextureHelper(GLenum target, GLuint texture) {
+ // TODO(gman): See note #1 above.
+ // TODO(gman): Change this to false once we figure out why it's failing
+ // on daisy.
+ bool changed = true;
+ TextureUnit& unit = texture_units_[active_texture_unit_];
+ switch (target) {
+ case GL_TEXTURE_2D:
+ if (unit.bound_texture_2d != texture) {
+ unit.bound_texture_2d = texture;
+ changed = true;
+ }
+ break;
+ case GL_TEXTURE_CUBE_MAP:
+ if (unit.bound_texture_cube_map != texture) {
+ unit.bound_texture_cube_map = texture;
+ changed = true;
+ }
+ break;
+ case GL_TEXTURE_EXTERNAL_OES:
+ if (unit.bound_texture_external_oes != texture) {
+ unit.bound_texture_external_oes = texture;
+ changed = true;
+ }
+ break;
+ default:
+ changed = true;
+ break;
+ }
+ // TODO(gman): There's a bug here. If the target is invalid the ID will not be
+ // used. even though it's marked it as used here.
+ GetIdHandler(id_namespaces::kTextures)->MarkAsUsedForBind(texture);
+ return changed;
+}
+
+bool GLES2Implementation::BindVertexArrayOESHelper(GLuint array) {
+ // TODO(gman): See note #1 above.
+ bool changed = false;
+ if (!vertex_array_object_manager_->BindVertexArray(array, &changed)) {
+ SetGLError(
+ GL_INVALID_OPERATION, "glBindVertexArrayOES",
+ "id was not generated with glGenVertexArrayOES");
+ }
+ // Unlike other BindXXXHelpers we don't call MarkAsUsedForBind
+ // because unlike other resources VertexArrayObject ids must
+ // be generated by GenVertexArrays. A random id to Bind will not
+ // generate a new object.
+ return changed;
+}
+
+bool GLES2Implementation::UseProgramHelper(GLuint program) {
+ bool changed = false;
+ if (current_program_ != program) {
+ current_program_ = program;
+ changed = true;
+ }
+ return changed;
+}
+
+bool GLES2Implementation::IsBufferReservedId(GLuint id) {
+ return vertex_array_object_manager_->IsReservedId(id);
+}
+
+void GLES2Implementation::DeleteBuffersHelper(
+ GLsizei n, const GLuint* buffers) {
+ if (!GetIdHandler(id_namespaces::kBuffers)->FreeIds(
+ this, n, buffers, &GLES2Implementation::DeleteBuffersStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteBuffers", "id not created by this context.");
+ return;
+ }
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (buffers[ii] == bound_array_buffer_id_) {
+ bound_array_buffer_id_ = 0;
+ }
+ vertex_array_object_manager_->UnbindBuffer(buffers[ii]);
+
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffers[ii]);
+ if (buffer)
+ RemoveTransferBuffer(buffer);
+
+ if (buffers[ii] == bound_pixel_unpack_transfer_buffer_id_) {
+ bound_pixel_unpack_transfer_buffer_id_ = 0;
+ }
+ }
+}
+
+void GLES2Implementation::DeleteBuffersStub(
+ GLsizei n, const GLuint* buffers) {
+ helper_->DeleteBuffersImmediate(n, buffers);
+}
+
+
+void GLES2Implementation::DeleteFramebuffersHelper(
+ GLsizei n, const GLuint* framebuffers) {
+ if (!GetIdHandler(id_namespaces::kFramebuffers)->FreeIds(
+ this, n, framebuffers, &GLES2Implementation::DeleteFramebuffersStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteFramebuffers", "id not created by this context.");
+ return;
+ }
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (framebuffers[ii] == bound_framebuffer_) {
+ bound_framebuffer_ = 0;
+ }
+ if (framebuffers[ii] == bound_read_framebuffer_) {
+ bound_read_framebuffer_ = 0;
+ }
+ }
+}
+
+void GLES2Implementation::DeleteFramebuffersStub(
+ GLsizei n, const GLuint* framebuffers) {
+ helper_->DeleteFramebuffersImmediate(n, framebuffers);
+}
+
+void GLES2Implementation::DeleteRenderbuffersHelper(
+ GLsizei n, const GLuint* renderbuffers) {
+ if (!GetIdHandler(id_namespaces::kRenderbuffers)->FreeIds(
+ this, n, renderbuffers, &GLES2Implementation::DeleteRenderbuffersStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteRenderbuffers", "id not created by this context.");
+ return;
+ }
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (renderbuffers[ii] == bound_renderbuffer_) {
+ bound_renderbuffer_ = 0;
+ }
+ }
+}
+
+void GLES2Implementation::DeleteRenderbuffersStub(
+ GLsizei n, const GLuint* renderbuffers) {
+ helper_->DeleteRenderbuffersImmediate(n, renderbuffers);
+}
+
+void GLES2Implementation::DeleteTexturesHelper(
+ GLsizei n, const GLuint* textures) {
+ if (!GetIdHandler(id_namespaces::kTextures)->FreeIds(
+ this, n, textures, &GLES2Implementation::DeleteTexturesStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteTextures", "id not created by this context.");
+ return;
+ }
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ for (GLint tt = 0;
+ tt < static_state_.int_state.max_combined_texture_image_units;
+ ++tt) {
+ TextureUnit& unit = texture_units_[tt];
+ if (textures[ii] == unit.bound_texture_2d) {
+ unit.bound_texture_2d = 0;
+ }
+ if (textures[ii] == unit.bound_texture_cube_map) {
+ unit.bound_texture_cube_map = 0;
+ }
+ if (textures[ii] == unit.bound_texture_external_oes) {
+ unit.bound_texture_external_oes = 0;
+ }
+ }
+ }
+}
+
+void GLES2Implementation::DeleteVertexArraysOESHelper(
+ GLsizei n, const GLuint* arrays) {
+ vertex_array_object_manager_->DeleteVertexArrays(n, arrays);
+ if (!GetIdHandler(id_namespaces::kVertexArrays)->FreeIds(
+ this, n, arrays, &GLES2Implementation::DeleteVertexArraysOESStub)) {
+ SetGLError(
+ GL_INVALID_VALUE,
+ "glDeleteVertexArraysOES", "id not created by this context.");
+ return;
+ }
+}
+
+void GLES2Implementation::DeleteVertexArraysOESStub(
+ GLsizei n, const GLuint* arrays) {
+ helper_->DeleteVertexArraysOESImmediate(n, arrays);
+}
+
+void GLES2Implementation::DeleteTexturesStub(
+ GLsizei n, const GLuint* textures) {
+ helper_->DeleteTexturesImmediate(n, textures);
+}
+
+void GLES2Implementation::DisableVertexAttribArray(GLuint index) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glDisableVertexAttribArray(" << index << ")");
+ vertex_array_object_manager_->SetAttribEnable(index, false);
+ helper_->DisableVertexAttribArray(index);
+ CheckGLError();
+}
+
+void GLES2Implementation::EnableVertexAttribArray(GLuint index) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnableVertexAttribArray("
+ << index << ")");
+ vertex_array_object_manager_->SetAttribEnable(index, true);
+ helper_->EnableVertexAttribArray(index);
+ CheckGLError();
+}
+
+void GLES2Implementation::DrawArrays(GLenum mode, GLint first, GLsizei count) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawArrays("
+ << GLES2Util::GetStringDrawMode(mode) << ", "
+ << first << ", " << count << ")");
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawArrays", "count < 0");
+ return;
+ }
+ bool simulated = false;
+ if (!vertex_array_object_manager_->SetupSimulatedClientSideBuffers(
+ "glDrawArrays", this, helper_, first + count, 0, &simulated)) {
+ return;
+ }
+ helper_->DrawArrays(mode, first, count);
+ RestoreArrayBuffer(simulated);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetVertexAttribfv(
+ GLuint index, GLenum pname, GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetVertexAttribfv("
+ << index << ", "
+ << GLES2Util::GetStringVertexAttribute(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ uint32 value = 0;
+ if (vertex_array_object_manager_->GetVertexAttrib(index, pname, &value)) {
+ *params = static_cast<float>(value);
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetVertexAttribfv");
+ typedef cmds::GetVertexAttribfv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetVertexAttribfv(
+ index, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GetVertexAttribiv(
+ GLuint index, GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetVertexAttribiv("
+ << index << ", "
+ << GLES2Util::GetStringVertexAttribute(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ uint32 value = 0;
+ if (vertex_array_object_manager_->GetVertexAttrib(index, pname, &value)) {
+ *params = value;
+ return;
+ }
+ TRACE_EVENT0("gpu", "GLES2::GetVertexAttribiv");
+ typedef cmds::GetVertexAttribiv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetVertexAttribiv(
+ index, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32 i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::Swap() {
+ SwapBuffers();
+}
+
+void GLES2Implementation::PartialSwapBuffers(const gfx::Rect& sub_buffer) {
+ PostSubBufferCHROMIUM(
+ sub_buffer.x(), sub_buffer.y(), sub_buffer.width(), sub_buffer.height());
+}
+
+static GLenum GetGLESOverlayTransform(gfx::OverlayTransform plane_transform) {
+ switch (plane_transform) {
+ case gfx::OVERLAY_TRANSFORM_INVALID:
+ break;
+ case gfx::OVERLAY_TRANSFORM_NONE:
+ return GL_OVERLAY_TRANSFORM_NONE_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_FLIP_HORIZONTAL:
+ return GL_OVERLAY_TRANSFORM_FLIP_HORIZONTAL_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_FLIP_VERTICAL:
+ return GL_OVERLAY_TRANSFORM_FLIP_VERTICAL_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_ROTATE_90:
+ return GL_OVERLAY_TRANSFORM_ROTATE_90_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_ROTATE_180:
+ return GL_OVERLAY_TRANSFORM_ROTATE_180_CHROMIUM;
+ case gfx::OVERLAY_TRANSFORM_ROTATE_270:
+ return GL_OVERLAY_TRANSFORM_ROTATE_270_CHROMIUM;
+ }
+ NOTREACHED();
+ return GL_OVERLAY_TRANSFORM_NONE_CHROMIUM;
+}
+
+void GLES2Implementation::ScheduleOverlayPlane(
+ int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect) {
+ ScheduleOverlayPlaneCHROMIUM(plane_z_order,
+ GetGLESOverlayTransform(plane_transform),
+ overlay_texture_id,
+ display_bounds.x(),
+ display_bounds.y(),
+ display_bounds.width(),
+ display_bounds.height(),
+ uv_rect.x(),
+ uv_rect.y(),
+ uv_rect.width(),
+ uv_rect.height());
+}
+
+GLboolean GLES2Implementation::EnableFeatureCHROMIUM(
+ const char* feature) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnableFeatureCHROMIUM("
+ << feature << ")");
+ TRACE_EVENT0("gpu", "GLES2::EnableFeatureCHROMIUM");
+ typedef cmds::EnableFeatureCHROMIUM::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return false;
+ }
+ *result = 0;
+ SetBucketAsCString(kResultBucketId, feature);
+ helper_->EnableFeatureCHROMIUM(
+ kResultBucketId, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ helper_->SetBucketSize(kResultBucketId, 0);
+ GPU_CLIENT_LOG(" returned " << GLES2Util::GetStringBool(*result));
+ return *result;
+}
+
+void* GLES2Implementation::MapBufferSubDataCHROMIUM(
+ GLuint target, GLintptr offset, GLsizeiptr size, GLenum access) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMapBufferSubDataCHROMIUM("
+ << target << ", " << offset << ", " << size << ", "
+ << GLES2Util::GetStringEnum(access) << ")");
+ // NOTE: target is NOT checked because the service will check it
+ // and we don't know what targets are valid.
+ if (access != GL_WRITE_ONLY) {
+ SetGLErrorInvalidEnum(
+ "glMapBufferSubDataCHROMIUM", access, "access");
+ return NULL;
+ }
+ if (!ValidateSize("glMapBufferSubDataCHROMIUM", size) ||
+ !ValidateOffset("glMapBufferSubDataCHROMIUM", offset)) {
+ return NULL;
+ }
+
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(size, &shm_id, &shm_offset);
+ if (!mem) {
+ SetGLError(GL_OUT_OF_MEMORY, "glMapBufferSubDataCHROMIUM", "out of memory");
+ return NULL;
+ }
+
+ std::pair<MappedBufferMap::iterator, bool> result =
+ mapped_buffers_.insert(std::make_pair(
+ mem,
+ MappedBuffer(
+ access, shm_id, mem, shm_offset, target, offset, size)));
+ DCHECK(result.second);
+ GPU_CLIENT_LOG(" returned " << mem);
+ return mem;
+}
+
+void GLES2Implementation::UnmapBufferSubDataCHROMIUM(const void* mem) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glUnmapBufferSubDataCHROMIUM(" << mem << ")");
+ MappedBufferMap::iterator it = mapped_buffers_.find(mem);
+ if (it == mapped_buffers_.end()) {
+ SetGLError(
+ GL_INVALID_VALUE, "UnmapBufferSubDataCHROMIUM", "buffer not mapped");
+ return;
+ }
+ const MappedBuffer& mb = it->second;
+ helper_->BufferSubData(
+ mb.target, mb.offset, mb.size, mb.shm_id, mb.shm_offset);
+ mapped_memory_->FreePendingToken(mb.shm_memory, helper_->InsertToken());
+ mapped_buffers_.erase(it);
+ CheckGLError();
+}
+
+void* GLES2Implementation::MapTexSubImage2DCHROMIUM(
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMapTexSubImage2DCHROMIUM("
+ << target << ", " << level << ", "
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << GLES2Util::GetStringEnum(access) << ")");
+ if (access != GL_WRITE_ONLY) {
+ SetGLErrorInvalidEnum(
+ "glMapTexSubImage2DCHROMIUM", access, "access");
+ return NULL;
+ }
+ // NOTE: target is NOT checked because the service will check it
+ // and we don't know what targets are valid.
+ if (level < 0 || xoffset < 0 || yoffset < 0 || width < 0 || height < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glMapTexSubImage2DCHROMIUM", "bad dimensions");
+ return NULL;
+ }
+ uint32 size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &size, NULL, NULL)) {
+ SetGLError(
+ GL_INVALID_VALUE, "glMapTexSubImage2DCHROMIUM", "image size too large");
+ return NULL;
+ }
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(size, &shm_id, &shm_offset);
+ if (!mem) {
+ SetGLError(GL_OUT_OF_MEMORY, "glMapTexSubImage2DCHROMIUM", "out of memory");
+ return NULL;
+ }
+
+ std::pair<MappedTextureMap::iterator, bool> result =
+ mapped_textures_.insert(std::make_pair(
+ mem,
+ MappedTexture(
+ access, shm_id, mem, shm_offset,
+ target, level, xoffset, yoffset, width, height, format, type)));
+ DCHECK(result.second);
+ GPU_CLIENT_LOG(" returned " << mem);
+ return mem;
+}
+
+void GLES2Implementation::UnmapTexSubImage2DCHROMIUM(const void* mem) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glUnmapTexSubImage2DCHROMIUM(" << mem << ")");
+ MappedTextureMap::iterator it = mapped_textures_.find(mem);
+ if (it == mapped_textures_.end()) {
+ SetGLError(
+ GL_INVALID_VALUE, "UnmapTexSubImage2DCHROMIUM", "texture not mapped");
+ return;
+ }
+ const MappedTexture& mt = it->second;
+ helper_->TexSubImage2D(
+ mt.target, mt.level, mt.xoffset, mt.yoffset, mt.width, mt.height,
+ mt.format, mt.type, mt.shm_id, mt.shm_offset, GL_FALSE);
+ mapped_memory_->FreePendingToken(mt.shm_memory, helper_->InsertToken());
+ mapped_textures_.erase(it);
+ CheckGLError();
+}
+
+void GLES2Implementation::ResizeCHROMIUM(GLuint width, GLuint height,
+ float scale_factor) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glResizeCHROMIUM("
+ << width << ", " << height << ", " << scale_factor << ")");
+ helper_->ResizeCHROMIUM(width, height, scale_factor);
+ CheckGLError();
+}
+
+const GLchar* GLES2Implementation::GetRequestableExtensionsCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glGetRequestableExtensionsCHROMIUM()");
+ TRACE_EVENT0("gpu",
+ "GLES2Implementation::GetRequestableExtensionsCHROMIUM()");
+ const char* result = NULL;
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetRequestableExtensionsCHROMIUM(kResultBucketId);
+ std::string str;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ // The set of requestable extensions shrinks as we enable
+ // them. Because we don't know when the client will stop referring
+ // to a previous one it queries (see GetString) we need to cache
+ // the unique results.
+ std::set<std::string>::const_iterator sit =
+ requestable_extensions_set_.find(str);
+ if (sit != requestable_extensions_set_.end()) {
+ result = sit->c_str();
+ } else {
+ std::pair<std::set<std::string>::const_iterator, bool> insert_result =
+ requestable_extensions_set_.insert(str);
+ DCHECK(insert_result.second);
+ result = insert_result.first->c_str();
+ }
+ }
+ GPU_CLIENT_LOG(" returned " << result);
+ return reinterpret_cast<const GLchar*>(result);
+}
+
+// TODO(gman): Remove this command. It's here for WebGL but is incompatible
+// with VirtualGL contexts.
+void GLES2Implementation::RequestExtensionCHROMIUM(const char* extension) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRequestExtensionCHROMIUM("
+ << extension << ")");
+ SetBucketAsCString(kResultBucketId, extension);
+ helper_->RequestExtensionCHROMIUM(kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+
+ struct ExtensionCheck {
+ const char* extension;
+ ExtensionStatus* status;
+ };
+ const ExtensionCheck checks[] = {
+ {
+ "GL_ANGLE_pack_reverse_row_order",
+ &angle_pack_reverse_row_order_status_,
+ },
+ {
+ "GL_CHROMIUM_framebuffer_multisample",
+ &chromium_framebuffer_multisample_,
+ },
+ };
+ const size_t kNumChecks = sizeof(checks)/sizeof(checks[0]);
+ for (size_t ii = 0; ii < kNumChecks; ++ii) {
+ const ExtensionCheck& check = checks[ii];
+ if (*check.status == kUnavailableExtensionStatus &&
+ !strcmp(extension, check.extension)) {
+ *check.status = kUnknownExtensionStatus;
+ }
+ }
+}
+
+void GLES2Implementation::RateLimitOffscreenContextCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRateLimitOffscreenCHROMIUM()");
+ // Wait if this would add too many rate limit tokens.
+ if (rate_limit_tokens_.size() == kMaxSwapBuffers) {
+ helper_->WaitForToken(rate_limit_tokens_.front());
+ rate_limit_tokens_.pop();
+ }
+ rate_limit_tokens_.push(helper_->InsertToken());
+}
+
+void GLES2Implementation::GetMultipleIntegervCHROMIUM(
+ const GLenum* pnames, GLuint count, GLint* results, GLsizeiptr size) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetMultipleIntegervCHROMIUM("
+ << static_cast<const void*>(pnames) << ", "
+ << count << ", " << results << ", "
+ << size << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLuint i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(
+ " " << i << ": " << GLES2Util::GetStringGLState(pnames[i]));
+ }
+ });
+ DCHECK(size >= 0 && FitInt32NonNegative<GLsizeiptr>(size));
+
+ GetMultipleIntegervState state(pnames, count, results, size);
+ if (!GetMultipleIntegervSetup(&state)) {
+ return;
+ }
+ state.buffer = transfer_buffer_->Alloc(state.transfer_buffer_size_needed);
+ if (!state.buffer) {
+ SetGLError(GL_OUT_OF_MEMORY, "glGetMultipleIntegervCHROMIUM",
+ "Transfer buffer allocation failed.");
+ return;
+ }
+ GetMultipleIntegervRequest(&state);
+ WaitForCmd();
+ GetMultipleIntegervOnCompleted(&state);
+
+ GPU_CLIENT_LOG(" returned");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int i = 0; i < state.num_results; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << (results[i]));
+ }
+ });
+
+ // TODO(gman): We should be able to free without a token.
+ transfer_buffer_->FreePendingToken(state.buffer, helper_->InsertToken());
+ CheckGLError();
+}
+
+bool GLES2Implementation::GetMultipleIntegervSetup(
+ GetMultipleIntegervState* state) {
+ state->num_results = 0;
+ for (GLuint ii = 0; ii < state->pnames_count; ++ii) {
+ int num = util_.GLGetNumValuesReturned(state->pnames[ii]);
+ if (!num) {
+ SetGLErrorInvalidEnum(
+ "glGetMultipleIntegervCHROMIUM", state->pnames[ii], "pname");
+ return false;
+ }
+ state->num_results += num;
+ }
+ if (static_cast<size_t>(state->results_size) !=
+ state->num_results * sizeof(GLint)) {
+ SetGLError(GL_INVALID_VALUE, "glGetMultipleIntegervCHROMIUM", "bad size");
+ return false;
+ }
+ for (int ii = 0; ii < state->num_results; ++ii) {
+ if (state->results[ii] != 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glGetMultipleIntegervCHROMIUM", "results not set to zero.");
+ return false;
+ }
+ }
+ state->transfer_buffer_size_needed =
+ state->pnames_count * sizeof(state->pnames[0]) +
+ state->num_results * sizeof(state->results[0]);
+ return true;
+}
+
+void GLES2Implementation::GetMultipleIntegervRequest(
+ GetMultipleIntegervState* state) {
+ GLenum* pnames_buffer = static_cast<GLenum*>(state->buffer);
+ state->results_buffer = pnames_buffer + state->pnames_count;
+ memcpy(pnames_buffer, state->pnames, state->pnames_count * sizeof(GLenum));
+ memset(state->results_buffer, 0, state->num_results * sizeof(GLint));
+ helper_->GetMultipleIntegervCHROMIUM(
+ transfer_buffer_->GetShmId(),
+ transfer_buffer_->GetOffset(pnames_buffer),
+ state->pnames_count,
+ transfer_buffer_->GetShmId(),
+ transfer_buffer_->GetOffset(state->results_buffer),
+ state->results_size);
+}
+
+void GLES2Implementation::GetMultipleIntegervOnCompleted(
+ GetMultipleIntegervState* state) {
+ memcpy(state->results, state->results_buffer, state->results_size);;
+}
+
+void GLES2Implementation::GetAllShaderPrecisionFormatsSetup(
+ GetAllShaderPrecisionFormatsState* state) {
+ state->transfer_buffer_size_needed =
+ state->precision_params_count *
+ sizeof(cmds::GetShaderPrecisionFormat::Result);
+}
+
+void GLES2Implementation::GetAllShaderPrecisionFormatsRequest(
+ GetAllShaderPrecisionFormatsState* state) {
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(state->results_buffer);
+
+ for (int i = 0; i < state->precision_params_count; i++) {
+ result->success = false;
+ helper_->GetShaderPrecisionFormat(state->precision_params[i][0],
+ state->precision_params[i][1],
+ transfer_buffer_->GetShmId(),
+ transfer_buffer_->GetOffset(result));
+ result++;
+ }
+}
+
+void GLES2Implementation::GetAllShaderPrecisionFormatsOnCompleted(
+ GetAllShaderPrecisionFormatsState* state) {
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+ Result* result = static_cast<Result*>(state->results_buffer);
+
+ for (int i = 0; i < state->precision_params_count; i++) {
+ if (result->success) {
+ const GLStaticState::ShaderPrecisionKey key(
+ state->precision_params[i][0], state->precision_params[i][1]);
+ static_state_.shader_precisions[key] = *result;
+ }
+ result++;
+ }
+}
+
+void GLES2Implementation::GetProgramInfoCHROMIUMHelper(
+ GLuint program, std::vector<int8>* result) {
+ DCHECK(result);
+ // Clear the bucket so if the command fails nothing will be in it.
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetProgramInfoCHROMIUM(program, kResultBucketId);
+ GetBucketContents(kResultBucketId, result);
+}
+
+void GLES2Implementation::GetProgramInfoCHROMIUM(
+ GLuint program, GLsizei bufsize, GLsizei* size, void* info) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ if (bufsize < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glProgramInfoCHROMIUM", "bufsize less than 0.");
+ return;
+ }
+ if (size == NULL) {
+ SetGLError(GL_INVALID_VALUE, "glProgramInfoCHROMIUM", "size is null.");
+ return;
+ }
+ // Make sure they've set size to 0 else the value will be undefined on
+ // lost context.
+ DCHECK_EQ(0, *size);
+ std::vector<int8> result;
+ GetProgramInfoCHROMIUMHelper(program, &result);
+ if (result.empty()) {
+ return;
+ }
+ *size = result.size();
+ if (!info) {
+ return;
+ }
+ if (static_cast<size_t>(bufsize) < result.size()) {
+ SetGLError(GL_INVALID_OPERATION,
+ "glProgramInfoCHROMIUM", "bufsize is too small for result.");
+ return;
+ }
+ memcpy(info, &result[0], result.size());
+}
+
+GLuint GLES2Implementation::CreateStreamTextureCHROMIUM(GLuint texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] CreateStreamTextureCHROMIUM("
+ << texture << ")");
+ TRACE_EVENT0("gpu", "GLES2::CreateStreamTextureCHROMIUM");
+ helper_->CommandBufferHelper::Flush();
+ return gpu_control_->CreateStreamTexture(texture);
+}
+
+void GLES2Implementation::PostSubBufferCHROMIUM(
+ GLint x, GLint y, GLint width, GLint height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] PostSubBufferCHROMIUM("
+ << x << ", " << y << ", " << width << ", " << height << ")");
+ TRACE_EVENT2("gpu", "GLES2::PostSubBufferCHROMIUM",
+ "width", width, "height", height);
+
+ // Same flow control as GLES2Implementation::SwapBuffers (see comments there).
+ swap_buffers_tokens_.push(helper_->InsertToken());
+ helper_->PostSubBufferCHROMIUM(x, y, width, height);
+ helper_->CommandBufferHelper::Flush();
+ if (swap_buffers_tokens_.size() > kMaxSwapBuffers + 1) {
+ helper_->WaitForToken(swap_buffers_tokens_.front());
+ swap_buffers_tokens_.pop();
+ }
+}
+
+void GLES2Implementation::DeleteQueriesEXTHelper(
+ GLsizei n, const GLuint* queries) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ query_tracker_->RemoveQuery(queries[ii]);
+ query_id_allocator_->FreeID(queries[ii]);
+ }
+
+ helper_->DeleteQueriesEXTImmediate(n, queries);
+}
+
+GLboolean GLES2Implementation::IsQueryEXT(GLuint id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] IsQueryEXT(" << id << ")");
+
+ // TODO(gman): To be spec compliant IDs from other contexts sharing
+ // resources need to return true here even though you can't share
+ // queries across contexts?
+ return query_tracker_->GetQuery(id) != NULL;
+}
+
+void GLES2Implementation::BeginQueryEXT(GLenum target, GLuint id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] BeginQueryEXT("
+ << GLES2Util::GetStringQueryTarget(target)
+ << ", " << id << ")");
+
+ // if any outstanding queries INV_OP
+ QueryMap::iterator it = current_queries_.find(target);
+ if (it != current_queries_.end()) {
+ SetGLError(
+ GL_INVALID_OPERATION, "glBeginQueryEXT", "query already in progress");
+ return;
+ }
+
+ // id = 0 INV_OP
+ if (id == 0) {
+ SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT", "id is 0");
+ return;
+ }
+
+ // if not GENned INV_OPERATION
+ if (!query_id_allocator_->InUse(id)) {
+ SetGLError(GL_INVALID_OPERATION, "glBeginQueryEXT", "invalid id");
+ return;
+ }
+
+ // if id does not have an object
+ QueryTracker::Query* query = query_tracker_->GetQuery(id);
+ if (!query) {
+ query = query_tracker_->CreateQuery(id, target);
+ if (!query) {
+ SetGLError(GL_OUT_OF_MEMORY,
+ "glBeginQueryEXT",
+ "transfer buffer allocation failed");
+ return;
+ }
+ } else if (query->target() != target) {
+ SetGLError(
+ GL_INVALID_OPERATION, "glBeginQueryEXT", "target does not match");
+ return;
+ }
+
+ current_queries_[target] = query;
+
+ query->Begin(this);
+ CheckGLError();
+}
+
+void GLES2Implementation::EndQueryEXT(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] EndQueryEXT("
+ << GLES2Util::GetStringQueryTarget(target) << ")");
+ // Don't do anything if the context is lost.
+ if (helper_->IsContextLost()) {
+ return;
+ }
+
+ QueryMap::iterator it = current_queries_.find(target);
+ if (it == current_queries_.end()) {
+ SetGLError(GL_INVALID_OPERATION, "glEndQueryEXT", "no active query");
+ return;
+ }
+
+ QueryTracker::Query* query = it->second;
+ query->End(this);
+ current_queries_.erase(it);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetQueryivEXT(
+ GLenum target, GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] GetQueryivEXT("
+ << GLES2Util::GetStringQueryTarget(target) << ", "
+ << GLES2Util::GetStringQueryParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+
+ if (pname != GL_CURRENT_QUERY_EXT) {
+ SetGLErrorInvalidEnum("glGetQueryivEXT", pname, "pname");
+ return;
+ }
+ QueryMap::iterator it = current_queries_.find(target);
+ if (it != current_queries_.end()) {
+ QueryTracker::Query* query = it->second;
+ *params = query->id();
+ } else {
+ *params = 0;
+ }
+ GPU_CLIENT_LOG(" " << *params);
+ CheckGLError();
+}
+
+void GLES2Implementation::GetQueryObjectuivEXT(
+ GLuint id, GLenum pname, GLuint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] GetQueryivEXT(" << id << ", "
+ << GLES2Util::GetStringQueryObjectParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+
+ QueryTracker::Query* query = query_tracker_->GetQuery(id);
+ if (!query) {
+ SetGLError(GL_INVALID_OPERATION, "glQueryObjectuivEXT", "unknown query id");
+ return;
+ }
+
+ QueryMap::iterator it = current_queries_.find(query->target());
+ if (it != current_queries_.end()) {
+ SetGLError(
+ GL_INVALID_OPERATION,
+ "glQueryObjectuivEXT", "query active. Did you to call glEndQueryEXT?");
+ return;
+ }
+
+ if (query->NeverUsed()) {
+ SetGLError(
+ GL_INVALID_OPERATION,
+ "glQueryObjectuivEXT", "Never used. Did you call glBeginQueryEXT?");
+ return;
+ }
+
+ switch (pname) {
+ case GL_QUERY_RESULT_EXT:
+ if (!query->CheckResultsAvailable(helper_)) {
+ helper_->WaitForToken(query->token());
+ if (!query->CheckResultsAvailable(helper_)) {
+ FinishHelper();
+ CHECK(query->CheckResultsAvailable(helper_));
+ }
+ }
+ *params = query->GetResult();
+ break;
+ case GL_QUERY_RESULT_AVAILABLE_EXT:
+ *params = query->CheckResultsAvailable(helper_);
+ break;
+ default:
+ SetGLErrorInvalidEnum("glQueryObjectuivEXT", pname, "pname");
+ break;
+ }
+ GPU_CLIENT_LOG(" " << *params);
+ CheckGLError();
+}
+
+void GLES2Implementation::DrawArraysInstancedANGLE(
+ GLenum mode, GLint first, GLsizei count, GLsizei primcount) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawArraysInstancedANGLE("
+ << GLES2Util::GetStringDrawMode(mode) << ", "
+ << first << ", " << count << ", " << primcount << ")");
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawArraysInstancedANGLE", "count < 0");
+ return;
+ }
+ if (primcount < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawArraysInstancedANGLE", "primcount < 0");
+ return;
+ }
+ if (primcount == 0) {
+ return;
+ }
+ bool simulated = false;
+ if (!vertex_array_object_manager_->SetupSimulatedClientSideBuffers(
+ "glDrawArraysInstancedANGLE", this, helper_, first + count, primcount,
+ &simulated)) {
+ return;
+ }
+ helper_->DrawArraysInstancedANGLE(mode, first, count, primcount);
+ RestoreArrayBuffer(simulated);
+ CheckGLError();
+}
+
+void GLES2Implementation::DrawElementsInstancedANGLE(
+ GLenum mode, GLsizei count, GLenum type, const void* indices,
+ GLsizei primcount) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawElementsInstancedANGLE("
+ << GLES2Util::GetStringDrawMode(mode) << ", "
+ << count << ", "
+ << GLES2Util::GetStringIndexType(type) << ", "
+ << static_cast<const void*>(indices) << ", "
+ << primcount << ")");
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glDrawElementsInstancedANGLE", "count less than 0.");
+ return;
+ }
+ if (count == 0) {
+ return;
+ }
+ if (primcount < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glDrawElementsInstancedANGLE", "primcount < 0");
+ return;
+ }
+ if (primcount == 0) {
+ return;
+ }
+ if (vertex_array_object_manager_->bound_element_array_buffer() != 0 &&
+ !ValidateOffset("glDrawElementsInstancedANGLE",
+ reinterpret_cast<GLintptr>(indices))) {
+ return;
+ }
+ GLuint offset = 0;
+ bool simulated = false;
+ if (!vertex_array_object_manager_->SetupSimulatedIndexAndClientSideBuffers(
+ "glDrawElementsInstancedANGLE", this, helper_, count, type, primcount,
+ indices, &offset, &simulated)) {
+ return;
+ }
+ helper_->DrawElementsInstancedANGLE(mode, count, type, offset, primcount);
+ RestoreElementAndArrayBuffers(simulated);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenMailboxCHROMIUM(
+ GLbyte* mailbox) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenMailboxCHROMIUM("
+ << static_cast<const void*>(mailbox) << ")");
+ TRACE_EVENT0("gpu", "GLES2::GenMailboxCHROMIUM");
+
+ gpu::Mailbox result = gpu::Mailbox::Generate();
+ memcpy(mailbox, result.name, sizeof(result.name));
+}
+
+void GLES2Implementation::ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glProduceTextureCHROMIUM("
+ << static_cast<const void*>(data) << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify()) << "ProduceTextureCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+ helper_->ProduceTextureCHROMIUMImmediate(target, data);
+ CheckGLError();
+}
+
+void GLES2Implementation::ProduceTextureDirectCHROMIUM(
+ GLuint texture, GLenum target, const GLbyte* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glProduceTextureDirectCHROMIUM("
+ << static_cast<const void*>(data) << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify()) << "ProduceTextureDirectCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+ helper_->ProduceTextureDirectCHROMIUMImmediate(texture, target, data);
+ CheckGLError();
+}
+
+void GLES2Implementation::ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glConsumeTextureCHROMIUM("
+ << static_cast<const void*>(data) << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify()) << "ConsumeTextureCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+ helper_->ConsumeTextureCHROMIUMImmediate(target, data);
+ CheckGLError();
+}
+
+GLuint GLES2Implementation::CreateAndConsumeTextureCHROMIUM(
+ GLenum target, const GLbyte* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCreateAndConsumeTextureCHROMIUM("
+ << static_cast<const void*>(data) << ")");
+ const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(data);
+ DCHECK(mailbox.Verify()) << "CreateAndConsumeTextureCHROMIUM was passed a "
+ "mailbox that was not generated by "
+ "GenMailboxCHROMIUM.";
+ GLuint client_id;
+ GetIdHandler(id_namespaces::kTextures)->MakeIds(this, 0, 1, &client_id);
+ helper_->CreateAndConsumeTextureCHROMIUMImmediate(target,
+ client_id, data);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ CheckGLError();
+ return client_id;
+}
+
+void GLES2Implementation::PushGroupMarkerEXT(
+ GLsizei length, const GLchar* marker) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPushGroupMarkerEXT("
+ << length << ", " << marker << ")");
+ if (!marker) {
+ marker = "";
+ }
+ SetBucketAsString(
+ kResultBucketId,
+ (length ? std::string(marker, length) : std::string(marker)));
+ helper_->PushGroupMarkerEXT(kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ debug_marker_manager_.PushGroup(
+ length ? std::string(marker, length) : std::string(marker));
+}
+
+void GLES2Implementation::InsertEventMarkerEXT(
+ GLsizei length, const GLchar* marker) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glInsertEventMarkerEXT("
+ << length << ", " << marker << ")");
+ if (!marker) {
+ marker = "";
+ }
+ SetBucketAsString(
+ kResultBucketId,
+ (length ? std::string(marker, length) : std::string(marker)));
+ helper_->InsertEventMarkerEXT(kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ debug_marker_manager_.SetMarker(
+ length ? std::string(marker, length) : std::string(marker));
+}
+
+void GLES2Implementation::PopGroupMarkerEXT() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPopGroupMarkerEXT()");
+ helper_->PopGroupMarkerEXT();
+ debug_marker_manager_.PopGroup();
+}
+
+void GLES2Implementation::TraceBeginCHROMIUM(const char* name) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTraceBeginCHROMIUM("
+ << name << ")");
+ if (current_trace_name_.get()) {
+ SetGLError(GL_INVALID_OPERATION, "glTraceBeginCHROMIUM",
+ "trace already running");
+ return;
+ }
+ TRACE_EVENT_COPY_ASYNC_BEGIN0("gpu", name, this);
+ SetBucketAsCString(kResultBucketId, name);
+ helper_->TraceBeginCHROMIUM(kResultBucketId);
+ helper_->SetBucketSize(kResultBucketId, 0);
+ current_trace_name_.reset(new std::string(name));
+}
+
+void GLES2Implementation::TraceEndCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTraceEndCHROMIUM(" << ")");
+ if (!current_trace_name_.get()) {
+ SetGLError(GL_INVALID_OPERATION, "glTraceEndCHROMIUM",
+ "missing begin trace");
+ return;
+ }
+ helper_->TraceEndCHROMIUM();
+ TRACE_EVENT_COPY_ASYNC_END0("gpu", current_trace_name_->c_str(), this);
+ current_trace_name_.reset();
+}
+
+void* GLES2Implementation::MapBufferCHROMIUM(GLuint target, GLenum access) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMapBufferCHROMIUM("
+ << target << ", " << GLES2Util::GetStringEnum(access) << ")");
+ switch (target) {
+ case GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM:
+ if (access != GL_READ_ONLY) {
+ SetGLError(GL_INVALID_ENUM, "glMapBufferCHROMIUM", "bad access mode");
+ return NULL;
+ }
+ break;
+ case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
+ if (access != GL_WRITE_ONLY) {
+ SetGLError(GL_INVALID_ENUM, "glMapBufferCHROMIUM", "bad access mode");
+ return NULL;
+ }
+ break;
+ default:
+ SetGLError(
+ GL_INVALID_ENUM, "glMapBufferCHROMIUM", "invalid target");
+ return NULL;
+ }
+ GLuint buffer_id;
+ GetBoundPixelTransferBuffer(target, "glMapBufferCHROMIUM", &buffer_id);
+ if (!buffer_id) {
+ return NULL;
+ }
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (!buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glMapBufferCHROMIUM", "invalid buffer");
+ return NULL;
+ }
+ if (buffer->mapped()) {
+ SetGLError(GL_INVALID_OPERATION, "glMapBufferCHROMIUM", "already mapped");
+ return NULL;
+ }
+ // Here we wait for previous transfer operations to be finished.
+ // TODO(hubbe): AsyncTex(Sub)Image2dCHROMIUM does not currently work
+ // with this method of synchronization. Until this is fixed,
+ // MapBufferCHROMIUM will not block even if the transfer is not ready
+ // for these calls.
+ if (buffer->last_usage_token()) {
+ helper_->WaitForToken(buffer->last_usage_token());
+ buffer->set_last_usage_token(0);
+ }
+ buffer->set_mapped(true);
+
+ GPU_CLIENT_LOG(" returned " << buffer->address());
+ CheckGLError();
+ return buffer->address();
+}
+
+GLboolean GLES2Implementation::UnmapBufferCHROMIUM(GLuint target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glUnmapBufferCHROMIUM(" << target << ")");
+ GLuint buffer_id;
+ if (!GetBoundPixelTransferBuffer(target, "glMapBufferCHROMIUM", &buffer_id)) {
+ SetGLError(GL_INVALID_ENUM, "glUnmapBufferCHROMIUM", "invalid target");
+ }
+ if (!buffer_id) {
+ return false;
+ }
+ BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
+ if (!buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapBufferCHROMIUM", "invalid buffer");
+ return false;
+ }
+ if (!buffer->mapped()) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapBufferCHROMIUM", "not mapped");
+ return false;
+ }
+ buffer->set_mapped(false);
+ CheckGLError();
+ return true;
+}
+
+bool GLES2Implementation::EnsureAsyncUploadSync() {
+ if (async_upload_sync_)
+ return true;
+
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(sizeof(AsyncUploadSync),
+ &shm_id,
+ &shm_offset);
+ if (!mem)
+ return false;
+
+ async_upload_sync_shm_id_ = shm_id;
+ async_upload_sync_shm_offset_ = shm_offset;
+ async_upload_sync_ = static_cast<AsyncUploadSync*>(mem);
+ async_upload_sync_->Reset();
+
+ return true;
+}
+
+uint32 GLES2Implementation::NextAsyncUploadToken() {
+ async_upload_token_++;
+ if (async_upload_token_ == 0)
+ async_upload_token_++;
+ return async_upload_token_;
+}
+
+void GLES2Implementation::PollAsyncUploads() {
+ if (!async_upload_sync_)
+ return;
+
+ if (helper_->IsContextLost()) {
+ DetachedAsyncUploadMemoryList::iterator it =
+ detached_async_upload_memory_.begin();
+ while (it != detached_async_upload_memory_.end()) {
+ mapped_memory_->Free(it->first);
+ it = detached_async_upload_memory_.erase(it);
+ }
+ return;
+ }
+
+ DetachedAsyncUploadMemoryList::iterator it =
+ detached_async_upload_memory_.begin();
+ while (it != detached_async_upload_memory_.end()) {
+ if (HasAsyncUploadTokenPassed(it->second)) {
+ mapped_memory_->Free(it->first);
+ it = detached_async_upload_memory_.erase(it);
+ } else {
+ break;
+ }
+ }
+}
+
+void GLES2Implementation::FreeAllAsyncUploadBuffers() {
+ // Free all completed unmanaged async uploads buffers.
+ PollAsyncUploads();
+
+ // Synchronously free rest of the unmanaged async upload buffers.
+ if (!detached_async_upload_memory_.empty()) {
+ WaitAllAsyncTexImage2DCHROMIUM();
+ WaitForCmd();
+ PollAsyncUploads();
+ }
+}
+
+void GLES2Implementation::AsyncTexImage2DCHROMIUM(
+ GLenum target, GLint level, GLenum internalformat, GLsizei width,
+ GLsizei height, GLint border, GLenum format, GLenum type,
+ const void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << width << ", " << height << ", " << border << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+ if (level < 0 || height < 0 || width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "dimension < 0");
+ return;
+ }
+ if (border != 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "border != 0");
+ return;
+ }
+ uint32 size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &size,
+ &unpadded_row_size, &padded_row_size)) {
+ SetGLError(GL_INVALID_VALUE, "glTexImage2D", "image size too large");
+ return;
+ }
+
+ // If there's no data/buffer just issue the AsyncTexImage2D
+ if (!pixels && !bound_pixel_unpack_transfer_buffer_id_) {
+ helper_->AsyncTexImage2DCHROMIUM(
+ target, level, internalformat, width, height, format, type,
+ 0, 0, 0, 0, 0);
+ return;
+ }
+
+ if (!EnsureAsyncUploadSync()) {
+ SetGLError(GL_OUT_OF_MEMORY, "glTexImage2D", "out of memory");
+ return;
+ }
+
+ // Otherwise, async uploads require a transfer buffer to be bound.
+ // TODO(hubbe): Make MapBufferCHROMIUM block if someone tries to re-use
+ // the buffer before the transfer is finished. (Currently such
+ // synchronization has to be handled manually.)
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glAsyncTexImage2DCHROMIUM", offset, size);
+ if (buffer && buffer->shm_id() != -1) {
+ uint32 async_token = NextAsyncUploadToken();
+ buffer->set_last_async_upload_token(async_token);
+ helper_->AsyncTexImage2DCHROMIUM(
+ target, level, internalformat, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset() + offset,
+ async_token,
+ async_upload_sync_shm_id_, async_upload_sync_shm_offset_);
+ }
+}
+
+void GLES2Implementation::AsyncTexSubImage2DCHROMIUM(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLenum type, const void* pixels) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glAsyncTexSubImage2DCHROMIUM("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", "
+ << xoffset << ", " << yoffset << ", "
+ << width << ", " << height << ", "
+ << GLES2Util::GetStringTextureFormat(format) << ", "
+ << GLES2Util::GetStringPixelType(type) << ", "
+ << static_cast<const void*>(pixels) << ")");
+ if (level < 0 || height < 0 || width < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glAsyncTexSubImage2DCHROMIUM", "dimension < 0");
+ return;
+ }
+
+ uint32 size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, unpack_alignment_, &size,
+ &unpadded_row_size, &padded_row_size)) {
+ SetGLError(
+ GL_INVALID_VALUE, "glAsyncTexSubImage2DCHROMIUM", "size to large");
+ return;
+ }
+
+ if (!EnsureAsyncUploadSync()) {
+ SetGLError(GL_OUT_OF_MEMORY, "glTexImage2D", "out of memory");
+ return;
+ }
+
+ // Async uploads require a transfer buffer to be bound.
+ // TODO(hubbe): Make MapBufferCHROMIUM block if someone tries to re-use
+ // the buffer before the transfer is finished. (Currently such
+ // synchronization has to be handled manually.)
+ GLuint offset = ToGLuint(pixels);
+ BufferTracker::Buffer* buffer = GetBoundPixelUnpackTransferBufferIfValid(
+ bound_pixel_unpack_transfer_buffer_id_,
+ "glAsyncTexSubImage2DCHROMIUM", offset, size);
+ if (buffer && buffer->shm_id() != -1) {
+ uint32 async_token = NextAsyncUploadToken();
+ buffer->set_last_async_upload_token(async_token);
+ helper_->AsyncTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type,
+ buffer->shm_id(), buffer->shm_offset() + offset,
+ async_token,
+ async_upload_sync_shm_id_, async_upload_sync_shm_offset_);
+ }
+}
+
+void GLES2Implementation::WaitAsyncTexImage2DCHROMIUM(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glWaitAsyncTexImage2DCHROMIUM("
+ << GLES2Util::GetStringTextureTarget(target) << ")");
+ helper_->WaitAsyncTexImage2DCHROMIUM(target);
+ CheckGLError();
+}
+
+void GLES2Implementation::WaitAllAsyncTexImage2DCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glWaitAllAsyncTexImage2DCHROMIUM()");
+ helper_->WaitAllAsyncTexImage2DCHROMIUM();
+ CheckGLError();
+}
+
+GLuint GLES2Implementation::InsertSyncPointCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glInsertSyncPointCHROMIUM");
+ helper_->CommandBufferHelper::Flush();
+ return gpu_control_->InsertSyncPoint();
+}
+
+GLuint GLES2Implementation::InsertFutureSyncPointCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glInsertFutureSyncPointCHROMIUM");
+ DCHECK(capabilities_.future_sync_points);
+ return gpu_control_->InsertFutureSyncPoint();
+}
+
+void GLES2Implementation::RetireSyncPointCHROMIUM(GLuint sync_point) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRetireSyncPointCHROMIUM("
+ << sync_point << ")");
+ DCHECK(capabilities_.future_sync_points);
+ helper_->CommandBufferHelper::Flush();
+ gpu_control_->RetireSyncPoint(sync_point);
+}
+
+namespace {
+
+bool ValidImageFormat(GLenum internalformat) {
+ switch (internalformat) {
+ case GL_RGB:
+ case GL_RGBA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ValidImageUsage(GLenum usage) {
+ switch (usage) {
+ case GL_MAP_CHROMIUM:
+ case GL_SCANOUT_CHROMIUM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+
+GLuint GLES2Implementation::CreateImageCHROMIUMHelper(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ if (width <= 0) {
+ SetGLError(GL_INVALID_VALUE, "glCreateImageCHROMIUM", "width <= 0");
+ return 0;
+ }
+
+ if (height <= 0) {
+ SetGLError(GL_INVALID_VALUE, "glCreateImageCHROMIUM", "height <= 0");
+ return 0;
+ }
+ // Flush the command stream to ensure ordering in case the newly
+ // returned image_id has recently been in use with a different buffer.
+ helper_->CommandBufferHelper::Flush();
+
+ // Create new buffer.
+ GLuint buffer_id = gpu_memory_buffer_tracker_->CreateBuffer(
+ width, height, internalformat, usage);
+ if (buffer_id == 0) {
+ SetGLError(GL_OUT_OF_MEMORY, "glCreateImageCHROMIUM", "out of GPU memory.");
+ return 0;
+ }
+ return buffer_id;
+}
+
+GLuint GLES2Implementation::CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glCreateImageCHROMIUM(" << width << ", "
+ << height << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << GLES2Util::GetStringTextureInternalFormat(usage) << ")");
+ GLuint image_id =
+ CreateImageCHROMIUMHelper(width, height, internalformat, usage);
+ CheckGLError();
+ return image_id;
+}
+
+void GLES2Implementation::DestroyImageCHROMIUMHelper(GLuint image_id) {
+ gfx::GpuMemoryBuffer* gpu_buffer = gpu_memory_buffer_tracker_->GetBuffer(
+ image_id);
+ if (!gpu_buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glDestroyImageCHROMIUM", "invalid image");
+ return;
+ }
+
+ // Flush the command stream to make sure all pending commands
+ // that may refer to the image_id are executed on the service side.
+ helper_->CommandBufferHelper::Flush();
+ gpu_memory_buffer_tracker_->RemoveBuffer(image_id);
+}
+
+void GLES2Implementation::DestroyImageCHROMIUM(GLuint image_id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDestroyImageCHROMIUM("
+ << image_id << ")");
+ DestroyImageCHROMIUMHelper(image_id);
+ CheckGLError();
+}
+
+void GLES2Implementation::UnmapImageCHROMIUMHelper(GLuint image_id) {
+ gfx::GpuMemoryBuffer* gpu_buffer = gpu_memory_buffer_tracker_->GetBuffer(
+ image_id);
+ if (!gpu_buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapImageCHROMIUM", "invalid image");
+ return;
+ }
+
+ if (!gpu_buffer->IsMapped()) {
+ SetGLError(GL_INVALID_OPERATION, "glUnmapImageCHROMIUM", "not mapped");
+ return;
+ }
+ gpu_buffer->Unmap();
+}
+
+void GLES2Implementation::UnmapImageCHROMIUM(GLuint image_id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUnmapImageCHROMIUM("
+ << image_id << ")");
+
+ UnmapImageCHROMIUMHelper(image_id);
+ CheckGLError();
+}
+
+void* GLES2Implementation::MapImageCHROMIUMHelper(GLuint image_id) {
+ gfx::GpuMemoryBuffer* gpu_buffer = gpu_memory_buffer_tracker_->GetBuffer(
+ image_id);
+ if (!gpu_buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glMapImageCHROMIUM", "invalid image");
+ return NULL;
+ }
+
+ if (gpu_buffer->IsMapped()) {
+ SetGLError(GL_INVALID_OPERATION, "glMapImageCHROMIUM", "already mapped");
+ return NULL;
+ }
+
+ return gpu_buffer->Map();
+}
+
+void* GLES2Implementation::MapImageCHROMIUM(GLuint image_id) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMapImageCHROMIUM(" << image_id
+ << ")");
+
+ void* mapped = MapImageCHROMIUMHelper(image_id);
+ CheckGLError();
+ return mapped;
+}
+
+void GLES2Implementation::GetImageParameterivCHROMIUMHelper(
+ GLuint image_id, GLenum pname, GLint* params) {
+ if (pname != GL_IMAGE_ROWBYTES_CHROMIUM) {
+ SetGLError(GL_INVALID_ENUM, "glGetImageParameterivCHROMIUM",
+ "invalid parameter");
+ return;
+ }
+
+ gfx::GpuMemoryBuffer* gpu_buffer = gpu_memory_buffer_tracker_->GetBuffer(
+ image_id);
+ if (!gpu_buffer) {
+ SetGLError(GL_INVALID_OPERATION, "glGetImageParameterivCHROMIUM",
+ "invalid image");
+ return;
+ }
+
+ if (!gpu_buffer->IsMapped()) {
+ SetGLError(
+ GL_INVALID_OPERATION, "glGetImageParameterivCHROMIUM", "not mapped");
+ return;
+ }
+
+ *params = gpu_buffer->GetStride();
+}
+
+void GLES2Implementation::GetImageParameterivCHROMIUM(
+ GLuint image_id, GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glImageParameterivCHROMIUM("
+ << image_id << ", "
+ << GLES2Util::GetStringBufferParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ GetImageParameterivCHROMIUMHelper(image_id, pname, params);
+ CheckGLError();
+}
+
+GLuint GLES2Implementation::CreateGpuMemoryBufferImageCHROMIUMHelper(
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ if (width <= 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glCreateGpuMemoryBufferImageCHROMIUM", "width <= 0");
+ return 0;
+ }
+
+ if (height <= 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ "height <= 0");
+ return 0;
+ }
+
+ if (!ValidImageFormat(internalformat)) {
+ SetGLError(GL_INVALID_VALUE,
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ "invalid format");
+ return 0;
+ }
+
+ if (!ValidImageUsage(usage)) {
+ SetGLError(GL_INVALID_VALUE,
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ "invalid usage");
+ return 0;
+ }
+
+ // Flush the command stream to ensure ordering in case the newly
+ // returned image_id has recently been in use with a different buffer.
+ helper_->CommandBufferHelper::Flush();
+
+ // Create new buffer.
+ GLuint buffer_id = gpu_memory_buffer_tracker_->CreateBuffer(
+ width,
+ height,
+ internalformat == GL_RGBA ? GL_RGBA8_OES : GL_RGB8_OES,
+ usage);
+ if (buffer_id == 0) {
+ SetGLError(GL_OUT_OF_MEMORY,
+ "glCreateGpuMemoryBufferImageCHROMIUM",
+ "out of GPU memory");
+ return 0;
+ }
+ return buffer_id;
+}
+
+GLuint GLES2Implementation::CreateGpuMemoryBufferImageCHROMIUM(
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glCreateGpuMemoryBufferImageCHROMIUM(" << width
+ << ", " << height << ", "
+ << GLES2Util::GetStringImageInternalFormat(internalformat)
+ << ", " << GLES2Util::GetStringImageUsage(usage) << ")");
+ GLuint image_id = CreateGpuMemoryBufferImageCHROMIUMHelper(
+ width, height, internalformat, usage);
+ CheckGLError();
+ return image_id;
+}
+
+bool GLES2Implementation::ValidateSize(const char* func, GLsizeiptr size) {
+ if (size < 0) {
+ SetGLError(GL_INVALID_VALUE, func, "size < 0");
+ return false;
+ }
+ if (!FitInt32NonNegative<GLsizeiptr>(size)) {
+ SetGLError(GL_INVALID_OPERATION, func, "size more than 32-bit");
+ return false;
+ }
+ return true;
+}
+
+bool GLES2Implementation::ValidateOffset(const char* func, GLintptr offset) {
+ if (offset < 0) {
+ SetGLError(GL_INVALID_VALUE, func, "offset < 0");
+ return false;
+ }
+ if (!FitInt32NonNegative<GLintptr>(offset)) {
+ SetGLError(GL_INVALID_OPERATION, func, "offset more than 32-bit");
+ return false;
+ }
+ return true;
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/gles2_implementation_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gles2_implementation.h b/gpu/command_buffer/client/gles2_implementation.h
new file mode 100644
index 0000000..84cda7e
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation.h
@@ -0,0 +1,828 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_H_
+
+#include <GLES2/gl2.h>
+
+#include <list>
+#include <map>
+#include <queue>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "gpu/command_buffer/client/buffer_tracker.h"
+#include "gpu/command_buffer/client/client_context_state.h"
+#include "gpu/command_buffer/client/context_support.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/gles2_impl_export.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/client/query_tracker.h"
+#include "gpu/command_buffer/client/ref_counted.h"
+#include "gpu/command_buffer/client/ring_buffer.h"
+#include "gpu/command_buffer/client/share_group.h"
+#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/common/debug_marker_manager.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+
+#if !defined(NDEBUG) && !defined(__native_client__) && !defined(GLES2_CONFORMANCE_TESTS) // NOLINT
+ #if defined(GLES2_INLINE_OPTIMIZATION)
+ // TODO(gman): Replace with macros that work with inline optmization.
+ #define GPU_CLIENT_SINGLE_THREAD_CHECK()
+ #define GPU_CLIENT_LOG(args)
+ #define GPU_CLIENT_LOG_CODE_BLOCK(code)
+ #define GPU_CLIENT_DCHECK_CODE_BLOCK(code)
+ #else
+ #include "base/logging.h"
+ #define GPU_CLIENT_SINGLE_THREAD_CHECK() SingleThreadChecker checker(this);
+ #define GPU_CLIENT_LOG(args) DLOG_IF(INFO, debug_) << args;
+ #define GPU_CLIENT_LOG_CODE_BLOCK(code) code
+ #define GPU_CLIENT_DCHECK_CODE_BLOCK(code) code
+ #define GPU_CLIENT_DEBUG
+ #endif
+#else
+ #define GPU_CLIENT_SINGLE_THREAD_CHECK()
+ #define GPU_CLIENT_LOG(args)
+ #define GPU_CLIENT_LOG_CODE_BLOCK(code)
+ #define GPU_CLIENT_DCHECK_CODE_BLOCK(code)
+#endif
+
+#if defined(GPU_CLIENT_DEBUG)
+ // Set to 1 to have the client fail when a GL error is generated.
+ // This helps find bugs in the renderer since the debugger stops on the error.
+# if 0
+# define GL_CLIENT_FAIL_GL_ERRORS
+# endif
+#endif
+
+// Check that destination pointers point to initialized memory.
+// When the context is lost, calling GL function has no effect so if destination
+// pointers point to initialized memory it can often lead to crash bugs. eg.
+//
+// GLsizei len;
+// glGetShaderSource(shader, max_size, &len, buffer);
+// std::string src(buffer, buffer + len); // len can be uninitialized here!!!
+//
+// Because this check is not official GL this check happens only on Chrome code,
+// not Pepper.
+//
+// If it was up to us we'd just always write to the destination but the OpenGL
+// spec defines the behavior of OpenGL functions, not us. :-(
+#if defined(__native_client__) || defined(GLES2_CONFORMANCE_TESTS)
+ #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v)
+ #define GPU_CLIENT_DCHECK(v)
+#elif defined(GPU_DCHECK)
+ #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) GPU_DCHECK(v)
+ #define GPU_CLIENT_DCHECK(v) GPU_DCHECK(v)
+#elif defined(DCHECK)
+ #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) DCHECK(v)
+ #define GPU_CLIENT_DCHECK(v) DCHECK(v)
+#else
+ #define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(v) ASSERT(v)
+ #define GPU_CLIENT_DCHECK(v) ASSERT(v)
+#endif
+
+#define GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(type, ptr) \
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(ptr && \
+ (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
+
+#define GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(type, ptr) \
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION_ASSERT(!ptr || \
+ (ptr[0] == static_cast<type>(0) || ptr[0] == static_cast<type>(-1)));
+
+struct GLUniformDefinitionCHROMIUM;
+
+namespace gpu {
+
+class GpuControl;
+class ScopedTransferBufferPtr;
+class TransferBufferInterface;
+
+namespace gles2 {
+
+class ImageFactory;
+class VertexArrayObjectManager;
+
+class GLES2ImplementationErrorMessageCallback {
+ public:
+ virtual ~GLES2ImplementationErrorMessageCallback() { }
+ virtual void OnErrorMessage(const char* msg, int id) = 0;
+};
+
+// This class emulates GLES2 over command buffers. It can be used by a client
+// program so that the program does not need deal with shared memory and command
+// buffer management. See gl2_lib.h. Note that there is a performance gain to
+// be had by changing your code to use command buffers directly by using the
+// GLES2CmdHelper but that entails changing your code to use and deal with
+// shared memory and synchronization issues.
+class GLES2_IMPL_EXPORT GLES2Implementation
+ : NON_EXPORTED_BASE(public GLES2Interface),
+ NON_EXPORTED_BASE(public ContextSupport) {
+ public:
+ enum MappedMemoryLimit {
+ kNoLimit = MappedMemoryManager::kNoLimit,
+ };
+
+ // Stores GL state that never changes.
+ struct GLES2_IMPL_EXPORT GLStaticState {
+ GLStaticState();
+ ~GLStaticState();
+
+ struct GLES2_IMPL_EXPORT IntState {
+ IntState();
+ GLint max_combined_texture_image_units;
+ GLint max_cube_map_texture_size;
+ GLint max_fragment_uniform_vectors;
+ GLint max_renderbuffer_size;
+ GLint max_texture_image_units;
+ GLint max_texture_size;
+ GLint max_varying_vectors;
+ GLint max_vertex_attribs;
+ GLint max_vertex_texture_image_units;
+ GLint max_vertex_uniform_vectors;
+ GLint num_compressed_texture_formats;
+ GLint num_shader_binary_formats;
+ GLint bind_generates_resource_chromium;
+ };
+ IntState int_state;
+
+ typedef std::pair<GLenum, GLenum> ShaderPrecisionKey;
+ typedef std::map<ShaderPrecisionKey,
+ cmds::GetShaderPrecisionFormat::Result>
+ ShaderPrecisionMap;
+ ShaderPrecisionMap shader_precisions;
+ };
+
+ // The maxiumum result size from simple GL get commands.
+ static const size_t kMaxSizeOfSimpleResult = 16 * sizeof(uint32); // NOLINT.
+
+ // used for testing only. If more things are reseved add them here.
+ static const unsigned int kStartingOffset = kMaxSizeOfSimpleResult;
+
+ // Size in bytes to issue async flush for transfer buffer.
+ static const unsigned int kSizeToFlush = 256 * 1024;
+
+ // The bucket used for results. Public for testing only.
+ static const uint32 kResultBucketId = 1;
+
+ // Alignment of allocations.
+ static const unsigned int kAlignment = 4;
+
+ // GL names for the buffers used to emulate client side buffers.
+ static const GLuint kClientSideArrayId = 0xFEDCBA98u;
+ static const GLuint kClientSideElementArrayId = 0xFEDCBA99u;
+
+ // Number of swap buffers allowed before waiting.
+ static const size_t kMaxSwapBuffers = 2;
+
+ GLES2Implementation(GLES2CmdHelper* helper,
+ ShareGroup* share_group,
+ TransferBufferInterface* transfer_buffer,
+ bool bind_generates_resource,
+ bool lose_context_when_out_of_memory,
+ GpuControl* gpu_control);
+
+ virtual ~GLES2Implementation();
+
+ bool Initialize(
+ unsigned int starting_transfer_buffer_size,
+ unsigned int min_transfer_buffer_size,
+ unsigned int max_transfer_buffer_size,
+ unsigned int mapped_memory_limit);
+
+ // The GLES2CmdHelper being used by this GLES2Implementation. You can use
+ // this to issue cmds at a lower level for certain kinds of optimization.
+ GLES2CmdHelper* helper() const;
+
+ // Gets client side generated errors.
+ GLenum GetClientSideGLError();
+
+ // Include the auto-generated part of this class. We split this because
+ // it means we can easily edit the non-auto generated parts right here in
+ // this file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_implementation_autogen.h"
+
+ virtual void DisableVertexAttribArray(GLuint index) OVERRIDE;
+ virtual void EnableVertexAttribArray(GLuint index) OVERRIDE;
+ virtual void GetVertexAttribfv(
+ GLuint index, GLenum pname, GLfloat* params) OVERRIDE;
+ virtual void GetVertexAttribiv(
+ GLuint index, GLenum pname, GLint* params) OVERRIDE;
+
+ // ContextSupport implementation.
+ virtual void Swap() OVERRIDE;
+ virtual void PartialSwapBuffers(const gfx::Rect& sub_buffer) OVERRIDE;
+ virtual void ScheduleOverlayPlane(int plane_z_order,
+ gfx::OverlayTransform plane_transform,
+ unsigned overlay_texture_id,
+ const gfx::Rect& display_bounds,
+ const gfx::RectF& uv_rect) OVERRIDE;
+ virtual GLuint InsertFutureSyncPointCHROMIUM() OVERRIDE;
+ virtual void RetireSyncPointCHROMIUM(GLuint sync_point) OVERRIDE;
+
+ void GetProgramInfoCHROMIUMHelper(GLuint program, std::vector<int8>* result);
+ GLint GetAttribLocationHelper(GLuint program, const char* name);
+ GLint GetUniformLocationHelper(GLuint program, const char* name);
+ bool GetActiveAttribHelper(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name);
+ bool GetActiveUniformHelper(
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name);
+
+ void FreeUnusedSharedMemory();
+ void FreeEverything();
+
+ // ContextSupport implementation.
+ virtual void SignalSyncPoint(uint32 sync_point,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SignalQuery(uint32 query,
+ const base::Closure& callback) OVERRIDE;
+ virtual void SetSurfaceVisible(bool visible) OVERRIDE;
+
+ void SetErrorMessageCallback(
+ GLES2ImplementationErrorMessageCallback* callback) {
+ error_message_callback_ = callback;
+ }
+
+ ShareGroup* share_group() const {
+ return share_group_.get();
+ }
+
+ const Capabilities& capabilities() const {
+ return capabilities_;
+ }
+
+ GpuControl* gpu_control() {
+ return gpu_control_;
+ }
+
+ ShareGroupContextData* share_group_context_data() {
+ return &share_group_context_data_;
+ }
+
+ private:
+ friend class GLES2ImplementationTest;
+ friend class VertexArrayObjectManager;
+
+ // Used to track whether an extension is available
+ enum ExtensionStatus {
+ kAvailableExtensionStatus,
+ kUnavailableExtensionStatus,
+ kUnknownExtensionStatus
+ };
+
+ // Base class for mapped resources.
+ struct MappedResource {
+ MappedResource(GLenum _access, int _shm_id, void* mem, unsigned int offset)
+ : access(_access),
+ shm_id(_shm_id),
+ shm_memory(mem),
+ shm_offset(offset) {
+ }
+
+ // access mode. Currently only GL_WRITE_ONLY is valid
+ GLenum access;
+
+ // Shared memory ID for buffer.
+ int shm_id;
+
+ // Address of shared memory
+ void* shm_memory;
+
+ // Offset of shared memory
+ unsigned int shm_offset;
+ };
+
+ // Used to track mapped textures.
+ struct MappedTexture : public MappedResource {
+ MappedTexture(
+ GLenum access,
+ int shm_id,
+ void* shm_mem,
+ unsigned int shm_offset,
+ GLenum _target,
+ GLint _level,
+ GLint _xoffset,
+ GLint _yoffset,
+ GLsizei _width,
+ GLsizei _height,
+ GLenum _format,
+ GLenum _type)
+ : MappedResource(access, shm_id, shm_mem, shm_offset),
+ target(_target),
+ level(_level),
+ xoffset(_xoffset),
+ yoffset(_yoffset),
+ width(_width),
+ height(_height),
+ format(_format),
+ type(_type) {
+ }
+
+ // These match the arguments to TexSubImage2D.
+ GLenum target;
+ GLint level;
+ GLint xoffset;
+ GLint yoffset;
+ GLsizei width;
+ GLsizei height;
+ GLenum format;
+ GLenum type;
+ };
+
+ // Used to track mapped buffers.
+ struct MappedBuffer : public MappedResource {
+ MappedBuffer(
+ GLenum access,
+ int shm_id,
+ void* shm_mem,
+ unsigned int shm_offset,
+ GLenum _target,
+ GLintptr _offset,
+ GLsizeiptr _size)
+ : MappedResource(access, shm_id, shm_mem, shm_offset),
+ target(_target),
+ offset(_offset),
+ size(_size) {
+ }
+
+ // These match the arguments to BufferSubData.
+ GLenum target;
+ GLintptr offset;
+ GLsizeiptr size;
+ };
+
+ struct TextureUnit {
+ TextureUnit()
+ : bound_texture_2d(0),
+ bound_texture_cube_map(0),
+ bound_texture_external_oes(0) {}
+
+ // texture currently bound to this unit's GL_TEXTURE_2D with glBindTexture
+ GLuint bound_texture_2d;
+
+ // texture currently bound to this unit's GL_TEXTURE_CUBE_MAP with
+ // glBindTexture
+ GLuint bound_texture_cube_map;
+
+ // texture currently bound to this unit's GL_TEXTURE_EXTERNAL_OES with
+ // glBindTexture
+ GLuint bound_texture_external_oes;
+ };
+
+ // Checks for single threaded access.
+ class SingleThreadChecker {
+ public:
+ explicit SingleThreadChecker(GLES2Implementation* gles2_implementation);
+ ~SingleThreadChecker();
+
+ private:
+ GLES2Implementation* gles2_implementation_;
+ };
+
+ // Gets the value of the result.
+ template <typename T>
+ T GetResultAs() {
+ return static_cast<T>(GetResultBuffer());
+ }
+
+ void* GetResultBuffer();
+ int32 GetResultShmId();
+ uint32 GetResultShmOffset();
+
+ bool QueryAndCacheStaticState();
+
+ // Helpers used to batch synchronous GetIntergerv calls with other
+ // synchronous calls.
+ struct GetMultipleIntegervState {
+ GetMultipleIntegervState(const GLenum* pnames, GLuint pnames_count,
+ GLint* results, GLsizeiptr results_size)
+ : pnames(pnames),
+ pnames_count(pnames_count),
+ results(results),
+ results_size(results_size)
+ { }
+ // inputs
+ const GLenum* pnames;
+ GLuint pnames_count;
+ // outputs
+ GLint* results;
+ GLsizeiptr results_size;
+ // transfer buffer
+ int num_results;
+ int transfer_buffer_size_needed;
+ void* buffer;
+ void* results_buffer;
+ };
+ bool GetMultipleIntegervSetup(
+ GetMultipleIntegervState* state);
+ void GetMultipleIntegervRequest(
+ GetMultipleIntegervState* state);
+ void GetMultipleIntegervOnCompleted(
+ GetMultipleIntegervState* state);
+
+ // Helpers used to batch synchronous GetShaderPrecision calls with other
+ // synchronous calls.
+ struct GetAllShaderPrecisionFormatsState {
+ GetAllShaderPrecisionFormatsState(
+ const GLenum (*precision_params)[2],
+ int precision_params_count)
+ : precision_params(precision_params),
+ precision_params_count(precision_params_count)
+ { }
+ const GLenum (*precision_params)[2];
+ int precision_params_count;
+ int transfer_buffer_size_needed;
+ void* results_buffer;
+ };
+ void GetAllShaderPrecisionFormatsSetup(
+ GetAllShaderPrecisionFormatsState* state);
+ void GetAllShaderPrecisionFormatsRequest(
+ GetAllShaderPrecisionFormatsState* state);
+ void GetAllShaderPrecisionFormatsOnCompleted(
+ GetAllShaderPrecisionFormatsState* state);
+
+ // Lazily determines if GL_ANGLE_pack_reverse_row_order is available
+ bool IsAnglePackReverseRowOrderAvailable();
+ bool IsChromiumFramebufferMultisampleAvailable();
+
+ bool IsExtensionAvailableHelper(
+ const char* extension, ExtensionStatus* status);
+
+ // Gets the GLError through our wrapper.
+ GLenum GetGLError();
+
+ // Sets our wrapper for the GLError.
+ void SetGLError(GLenum error, const char* function_name, const char* msg);
+ void SetGLErrorInvalidEnum(
+ const char* function_name, GLenum value, const char* label);
+
+ // Returns the last error and clears it. Useful for debugging.
+ const std::string& GetLastError() {
+ return last_error_;
+ }
+
+ // Waits for all commands to execute.
+ void WaitForCmd();
+
+ // TODO(gman): These bucket functions really seem like they belong in
+ // CommandBufferHelper (or maybe BucketHelper?). Unfortunately they need
+ // a transfer buffer to function which is currently managed by this class.
+
+ // Gets the contents of a bucket.
+ bool GetBucketContents(uint32 bucket_id, std::vector<int8>* data);
+
+ // Sets the contents of a bucket.
+ void SetBucketContents(uint32 bucket_id, const void* data, size_t size);
+
+ // Sets the contents of a bucket as a string.
+ void SetBucketAsCString(uint32 bucket_id, const char* str);
+
+ // Gets the contents of a bucket as a string. Returns false if there is no
+ // string available which is a separate case from the empty string.
+ bool GetBucketAsString(uint32 bucket_id, std::string* str);
+
+ // Sets the contents of a bucket as a string.
+ void SetBucketAsString(uint32 bucket_id, const std::string& str);
+
+ // Returns true if id is reserved.
+ bool IsBufferReservedId(GLuint id);
+ bool IsFramebufferReservedId(GLuint id) { return false; }
+ bool IsRenderbufferReservedId(GLuint id) { return false; }
+ bool IsTextureReservedId(GLuint id) { return false; }
+ bool IsVertexArrayReservedId(GLuint id) { return false; }
+ bool IsProgramReservedId(GLuint id) { return false; }
+
+ bool BindBufferHelper(GLenum target, GLuint texture);
+ bool BindFramebufferHelper(GLenum target, GLuint texture);
+ bool BindRenderbufferHelper(GLenum target, GLuint texture);
+ bool BindTextureHelper(GLenum target, GLuint texture);
+ bool BindVertexArrayOESHelper(GLuint array);
+ bool UseProgramHelper(GLuint program);
+
+ void GenBuffersHelper(GLsizei n, const GLuint* buffers);
+ void GenFramebuffersHelper(GLsizei n, const GLuint* framebuffers);
+ void GenRenderbuffersHelper(GLsizei n, const GLuint* renderbuffers);
+ void GenTexturesHelper(GLsizei n, const GLuint* textures);
+ void GenVertexArraysOESHelper(GLsizei n, const GLuint* arrays);
+ void GenQueriesEXTHelper(GLsizei n, const GLuint* queries);
+
+ void DeleteBuffersHelper(GLsizei n, const GLuint* buffers);
+ void DeleteFramebuffersHelper(GLsizei n, const GLuint* framebuffers);
+ void DeleteRenderbuffersHelper(GLsizei n, const GLuint* renderbuffers);
+ void DeleteTexturesHelper(GLsizei n, const GLuint* textures);
+ bool DeleteProgramHelper(GLuint program);
+ bool DeleteShaderHelper(GLuint shader);
+ void DeleteQueriesEXTHelper(GLsizei n, const GLuint* queries);
+ void DeleteVertexArraysOESHelper(GLsizei n, const GLuint* arrays);
+
+ void DeleteBuffersStub(GLsizei n, const GLuint* buffers);
+ void DeleteFramebuffersStub(GLsizei n, const GLuint* framebuffers);
+ void DeleteRenderbuffersStub(GLsizei n, const GLuint* renderbuffers);
+ void DeleteTexturesStub(GLsizei n, const GLuint* textures);
+ void DeleteProgramStub(GLsizei n, const GLuint* programs);
+ void DeleteShaderStub(GLsizei n, const GLuint* shaders);
+ void DeleteVertexArraysOESStub(GLsizei n, const GLuint* arrays);
+
+ void BufferDataHelper(
+ GLenum target, GLsizeiptr size, const void* data, GLenum usage);
+ void BufferSubDataHelper(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data);
+ void BufferSubDataHelperImpl(
+ GLenum target, GLintptr offset, GLsizeiptr size, const void* data,
+ ScopedTransferBufferPtr* buffer);
+
+ GLuint CreateImageCHROMIUMHelper(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage);
+ void DestroyImageCHROMIUMHelper(GLuint image_id);
+ void* MapImageCHROMIUMHelper(GLuint image_id);
+ void UnmapImageCHROMIUMHelper(GLuint image_id);
+ void GetImageParameterivCHROMIUMHelper(
+ GLuint image_id, GLenum pname, GLint* params);
+ GLuint CreateGpuMemoryBufferImageCHROMIUMHelper(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage);
+
+ // Helper for GetVertexAttrib
+ bool GetVertexAttribHelper(GLuint index, GLenum pname, uint32* param);
+
+ GLuint GetMaxValueInBufferCHROMIUMHelper(
+ GLuint buffer_id, GLsizei count, GLenum type, GLuint offset);
+
+ void RestoreElementAndArrayBuffers(bool restore);
+ void RestoreArrayBuffer(bool restrore);
+
+ // The pixels pointer should already account for unpack skip rows and skip
+ // pixels.
+ void TexSubImage2DImpl(
+ GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width,
+ GLsizei height, GLenum format, GLenum type, uint32 unpadded_row_size,
+ const void* pixels, uint32 pixels_padded_row_size, GLboolean internal,
+ ScopedTransferBufferPtr* buffer, uint32 buffer_padded_row_size);
+
+ // Helpers for query functions.
+ bool GetHelper(GLenum pname, GLint* params);
+ bool GetBooleanvHelper(GLenum pname, GLboolean* params);
+ bool GetBufferParameterivHelper(GLenum target, GLenum pname, GLint* params);
+ bool GetFloatvHelper(GLenum pname, GLfloat* params);
+ bool GetFramebufferAttachmentParameterivHelper(
+ GLenum target, GLenum attachment, GLenum pname, GLint* params);
+ bool GetIntegervHelper(GLenum pname, GLint* params);
+ bool GetProgramivHelper(GLuint program, GLenum pname, GLint* params);
+ bool GetRenderbufferParameterivHelper(
+ GLenum target, GLenum pname, GLint* params);
+ bool GetShaderivHelper(GLuint shader, GLenum pname, GLint* params);
+ bool GetTexParameterfvHelper(GLenum target, GLenum pname, GLfloat* params);
+ bool GetTexParameterivHelper(GLenum target, GLenum pname, GLint* params);
+ const GLubyte* GetStringHelper(GLenum name);
+
+ bool IsExtensionAvailable(const char* ext);
+
+ // Caches certain capabilties state. Return true if cached.
+ bool SetCapabilityState(GLenum cap, bool enabled);
+
+ IdHandlerInterface* GetIdHandler(int id_namespace) const;
+ // IdAllocators for objects that can't be shared among contexts.
+ // For now, used only for Queries. TODO(hj.r.chung) Should be added for
+ // Framebuffer and Vertex array objects.
+ IdAllocator* GetIdAllocator(int id_namespace) const;
+
+ void FinishHelper();
+
+ void RunIfContextNotLost(const base::Closure& callback);
+
+ // Validate if an offset is valid, i.e., non-negative and fit into 32-bit.
+ // If not, generate an approriate error, and return false.
+ bool ValidateOffset(const char* func, GLintptr offset);
+
+ // Validate if a size is valid, i.e., non-negative and fit into 32-bit.
+ // If not, generate an approriate error, and return false.
+ bool ValidateSize(const char* func, GLsizeiptr offset);
+
+ // Remove the transfer buffer from the buffer tracker. For buffers used
+ // asynchronously the memory is free:ed if the upload has completed. For
+ // other buffers, the memory is either free:ed immediately or free:ed pending
+ // a token.
+ void RemoveTransferBuffer(BufferTracker::Buffer* buffer);
+
+ // Returns true if the async upload token has passed.
+ //
+ // NOTE: This will detect wrapped async tokens by checking if the most
+ // significant bit of async token to check is 1 but the last read is 0, i.e.
+ // the uint32 wrapped.
+ bool HasAsyncUploadTokenPassed(uint32 token) const {
+ return async_upload_sync_->HasAsyncUploadTokenPassed(token);
+ }
+
+ // Get the next async upload token.
+ uint32 NextAsyncUploadToken();
+
+ // Ensure that the shared memory used for synchronizing async upload tokens
+ // has been mapped.
+ //
+ // Returns false on error, true on success.
+ bool EnsureAsyncUploadSync();
+
+ // Checks the last read asynchronously upload token and frees any unmanaged
+ // transfer buffer that has its async token passed.
+ void PollAsyncUploads();
+
+ // Free every async upload buffer. If some async upload buffer is still in use
+ // wait for them to finish before freeing.
+ void FreeAllAsyncUploadBuffers();
+
+ bool GetBoundPixelTransferBuffer(
+ GLenum target, const char* function_name, GLuint* buffer_id);
+ BufferTracker::Buffer* GetBoundPixelUnpackTransferBufferIfValid(
+ GLuint buffer_id,
+ const char* function_name, GLuint offset, GLsizei size);
+
+ const std::string& GetLogPrefix() const;
+
+#if defined(GL_CLIENT_FAIL_GL_ERRORS)
+ void CheckGLError();
+ void FailGLError(GLenum error);
+#else
+ void CheckGLError() { }
+ void FailGLError(GLenum /* error */) { }
+#endif
+
+ GLES2Util util_;
+ GLES2CmdHelper* helper_;
+ TransferBufferInterface* transfer_buffer_;
+ std::string last_error_;
+ DebugMarkerManager debug_marker_manager_;
+ std::string this_in_hex_;
+
+ std::queue<int32> swap_buffers_tokens_;
+ std::queue<int32> rate_limit_tokens_;
+
+ ExtensionStatus angle_pack_reverse_row_order_status_;
+ ExtensionStatus chromium_framebuffer_multisample_;
+
+ GLStaticState static_state_;
+ ClientContextState state_;
+
+ // pack alignment as last set by glPixelStorei
+ GLint pack_alignment_;
+
+ // unpack alignment as last set by glPixelStorei
+ GLint unpack_alignment_;
+
+ // unpack yflip as last set by glPixelstorei
+ bool unpack_flip_y_;
+
+ // unpack row length as last set by glPixelStorei
+ GLint unpack_row_length_;
+
+ // unpack skip rows as last set by glPixelStorei
+ GLint unpack_skip_rows_;
+
+ // unpack skip pixels as last set by glPixelStorei
+ GLint unpack_skip_pixels_;
+
+ // pack reverse row order as last set by glPixelstorei
+ bool pack_reverse_row_order_;
+
+ scoped_ptr<TextureUnit[]> texture_units_;
+
+ // 0 to gl_state_.max_combined_texture_image_units.
+ GLuint active_texture_unit_;
+
+ GLuint bound_framebuffer_;
+ GLuint bound_read_framebuffer_;
+ GLuint bound_renderbuffer_;
+
+ // The program in use by glUseProgram
+ GLuint current_program_;
+
+ // The currently bound array buffer.
+ GLuint bound_array_buffer_id_;
+
+ // The currently bound pixel transfer buffers.
+ GLuint bound_pixel_pack_transfer_buffer_id_;
+ GLuint bound_pixel_unpack_transfer_buffer_id_;
+
+ // The current asynchronous pixel buffer upload token.
+ uint32 async_upload_token_;
+
+ // The shared memory used for synchronizing asynchronous upload tokens.
+ AsyncUploadSync* async_upload_sync_;
+ int32 async_upload_sync_shm_id_;
+ unsigned int async_upload_sync_shm_offset_;
+
+ // Unmanaged pixel transfer buffer memory pending asynchronous upload token.
+ typedef std::list<std::pair<void*, uint32> > DetachedAsyncUploadMemoryList;
+ DetachedAsyncUploadMemoryList detached_async_upload_memory_;
+
+ // Client side management for vertex array objects. Needed to correctly
+ // track client side arrays.
+ scoped_ptr<VertexArrayObjectManager> vertex_array_object_manager_;
+
+ GLuint reserved_ids_[2];
+
+ // Current GL error bits.
+ uint32 error_bits_;
+
+ // Whether or not to print debugging info.
+ bool debug_;
+
+ // When true, the context is lost when a GL_OUT_OF_MEMORY error occurs.
+ bool lose_context_when_out_of_memory_;
+
+ // Used to check for single threaded access.
+ int use_count_;
+
+ // Map of GLenum to Strings for glGetString. We need to cache these because
+ // the pointer passed back to the client has to remain valid for eternity.
+ typedef std::map<uint32, std::set<std::string> > GLStringMap;
+ GLStringMap gl_strings_;
+
+ // Similar cache for glGetRequestableExtensionsCHROMIUM. We don't
+ // have an enum for this so handle it separately.
+ std::set<std::string> requestable_extensions_set_;
+
+ typedef std::map<const void*, MappedBuffer> MappedBufferMap;
+ MappedBufferMap mapped_buffers_;
+
+ typedef std::map<const void*, MappedTexture> MappedTextureMap;
+ MappedTextureMap mapped_textures_;
+
+ scoped_ptr<MappedMemoryManager> mapped_memory_;
+
+ scoped_refptr<ShareGroup> share_group_;
+ ShareGroupContextData share_group_context_data_;
+
+ scoped_ptr<QueryTracker> query_tracker_;
+ typedef std::map<GLuint, QueryTracker::Query*> QueryMap;
+ QueryMap current_queries_;
+ scoped_ptr<IdAllocator> query_id_allocator_;
+
+ scoped_ptr<BufferTracker> buffer_tracker_;
+
+ scoped_ptr<GpuMemoryBufferTracker> gpu_memory_buffer_tracker_;
+
+ GLES2ImplementationErrorMessageCallback* error_message_callback_;
+
+ scoped_ptr<std::string> current_trace_name_;
+
+ GpuControl* gpu_control_;
+
+ Capabilities capabilities_;
+
+ base::WeakPtrFactory<GLES2Implementation> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLES2Implementation);
+};
+
+inline bool GLES2Implementation::GetBufferParameterivHelper(
+ GLenum /* target */, GLenum /* pname */, GLint* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetFramebufferAttachmentParameterivHelper(
+ GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetRenderbufferParameterivHelper(
+ GLenum /* target */, GLenum /* pname */, GLint* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetShaderivHelper(
+ GLuint /* shader */, GLenum /* pname */, GLint* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetTexParameterfvHelper(
+ GLenum /* target */, GLenum /* pname */, GLfloat* /* params */) {
+ return false;
+}
+
+inline bool GLES2Implementation::GetTexParameterivHelper(
+ GLenum /* target */, GLenum /* pname */, GLint* /* params */) {
+ return false;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_H_
diff --git a/gpu/command_buffer/client/gles2_implementation_autogen.h b/gpu/command_buffer/client/gles2_implementation_autogen.h
new file mode 100644
index 0000000..0a53a86
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -0,0 +1,740 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_implementation.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
+
+virtual void ActiveTexture(GLenum texture) OVERRIDE;
+
+virtual void AttachShader(GLuint program, GLuint shader) OVERRIDE;
+
+virtual void BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) OVERRIDE;
+
+virtual void BindBuffer(GLenum target, GLuint buffer) OVERRIDE;
+
+virtual void BindFramebuffer(GLenum target, GLuint framebuffer) OVERRIDE;
+
+virtual void BindRenderbuffer(GLenum target, GLuint renderbuffer) OVERRIDE;
+
+virtual void BindTexture(GLenum target, GLuint texture) OVERRIDE;
+
+virtual void BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+
+virtual void BlendEquation(GLenum mode) OVERRIDE;
+
+virtual void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) OVERRIDE;
+
+virtual void BlendFunc(GLenum sfactor, GLenum dfactor) OVERRIDE;
+
+virtual void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) OVERRIDE;
+
+virtual void BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) OVERRIDE;
+
+virtual void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) OVERRIDE;
+
+virtual GLenum CheckFramebufferStatus(GLenum target) OVERRIDE;
+
+virtual void Clear(GLbitfield mask) OVERRIDE;
+
+virtual void ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+
+virtual void ClearDepthf(GLclampf depth) OVERRIDE;
+
+virtual void ClearStencil(GLint s) OVERRIDE;
+
+virtual void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) OVERRIDE;
+
+virtual void CompileShader(GLuint shader) OVERRIDE;
+
+virtual void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+
+virtual void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+
+virtual void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) OVERRIDE;
+
+virtual void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual GLuint CreateProgram() OVERRIDE;
+
+virtual GLuint CreateShader(GLenum type) OVERRIDE;
+
+virtual void CullFace(GLenum mode) OVERRIDE;
+
+virtual void DeleteBuffers(GLsizei n, const GLuint* buffers) OVERRIDE;
+
+virtual void DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) OVERRIDE;
+
+virtual void DeleteProgram(GLuint program) OVERRIDE;
+
+virtual void DeleteRenderbuffers(GLsizei n,
+ const GLuint* renderbuffers) OVERRIDE;
+
+virtual void DeleteShader(GLuint shader) OVERRIDE;
+
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) OVERRIDE;
+
+virtual void DepthFunc(GLenum func) OVERRIDE;
+
+virtual void DepthMask(GLboolean flag) OVERRIDE;
+
+virtual void DepthRangef(GLclampf zNear, GLclampf zFar) OVERRIDE;
+
+virtual void DetachShader(GLuint program, GLuint shader) OVERRIDE;
+
+virtual void Disable(GLenum cap) OVERRIDE;
+
+virtual void DrawArrays(GLenum mode, GLint first, GLsizei count) OVERRIDE;
+
+virtual void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) OVERRIDE;
+
+virtual void Enable(GLenum cap) OVERRIDE;
+
+virtual void Finish() OVERRIDE;
+
+virtual void Flush() OVERRIDE;
+
+virtual void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) OVERRIDE;
+
+virtual void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) OVERRIDE;
+
+virtual void FrontFace(GLenum mode) OVERRIDE;
+
+virtual void GenBuffers(GLsizei n, GLuint* buffers) OVERRIDE;
+
+virtual void GenerateMipmap(GLenum target) OVERRIDE;
+
+virtual void GenFramebuffers(GLsizei n, GLuint* framebuffers) OVERRIDE;
+
+virtual void GenRenderbuffers(GLsizei n, GLuint* renderbuffers) OVERRIDE;
+
+virtual void GenTextures(GLsizei n, GLuint* textures) OVERRIDE;
+
+virtual void GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+virtual void GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+virtual void GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) OVERRIDE;
+
+virtual GLint GetAttribLocation(GLuint program, const char* name) OVERRIDE;
+
+virtual void GetBooleanv(GLenum pname, GLboolean* params) OVERRIDE;
+
+virtual void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual GLenum GetError() OVERRIDE;
+
+virtual void GetFloatv(GLenum pname, GLfloat* params) OVERRIDE;
+
+virtual void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual void GetIntegerv(GLenum pname, GLint* params) OVERRIDE;
+
+virtual void GetProgramiv(GLuint program, GLenum pname, GLint* params) OVERRIDE;
+
+virtual void GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+
+virtual void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual void GetShaderiv(GLuint shader, GLenum pname, GLint* params) OVERRIDE;
+
+virtual void GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+
+virtual void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) OVERRIDE;
+
+virtual void GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+
+virtual const GLubyte* GetString(GLenum name) OVERRIDE;
+
+virtual void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+
+virtual void GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual void GetUniformfv(GLuint program,
+ GLint location,
+ GLfloat* params) OVERRIDE;
+
+virtual void GetUniformiv(GLuint program,
+ GLint location,
+ GLint* params) OVERRIDE;
+
+virtual GLint GetUniformLocation(GLuint program, const char* name) OVERRIDE;
+
+virtual void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) OVERRIDE;
+
+virtual void Hint(GLenum target, GLenum mode) OVERRIDE;
+
+virtual GLboolean IsBuffer(GLuint buffer) OVERRIDE;
+
+virtual GLboolean IsEnabled(GLenum cap) OVERRIDE;
+
+virtual GLboolean IsFramebuffer(GLuint framebuffer) OVERRIDE;
+
+virtual GLboolean IsProgram(GLuint program) OVERRIDE;
+
+virtual GLboolean IsRenderbuffer(GLuint renderbuffer) OVERRIDE;
+
+virtual GLboolean IsShader(GLuint shader) OVERRIDE;
+
+virtual GLboolean IsTexture(GLuint texture) OVERRIDE;
+
+virtual void LineWidth(GLfloat width) OVERRIDE;
+
+virtual void LinkProgram(GLuint program) OVERRIDE;
+
+virtual void PixelStorei(GLenum pname, GLint param) OVERRIDE;
+
+virtual void PolygonOffset(GLfloat factor, GLfloat units) OVERRIDE;
+
+virtual void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) OVERRIDE;
+
+virtual void ReleaseShaderCompiler() OVERRIDE;
+
+virtual void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual void SampleCoverage(GLclampf value, GLboolean invert) OVERRIDE;
+
+virtual void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+
+virtual void ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) OVERRIDE;
+
+virtual void ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) OVERRIDE;
+
+virtual void ShallowFinishCHROMIUM() OVERRIDE;
+
+virtual void ShallowFlushCHROMIUM() OVERRIDE;
+
+virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) OVERRIDE;
+
+virtual void StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) OVERRIDE;
+
+virtual void StencilMask(GLuint mask) OVERRIDE;
+
+virtual void StencilMaskSeparate(GLenum face, GLuint mask) OVERRIDE;
+
+virtual void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) OVERRIDE;
+
+virtual void StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) OVERRIDE;
+
+virtual void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+
+virtual void TexParameterf(GLenum target, GLenum pname, GLfloat param) OVERRIDE;
+
+virtual void TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) OVERRIDE;
+
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) OVERRIDE;
+
+virtual void TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) OVERRIDE;
+
+virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+
+virtual void Uniform1f(GLint location, GLfloat x) OVERRIDE;
+
+virtual void Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+
+virtual void Uniform1i(GLint location, GLint x) OVERRIDE;
+
+virtual void Uniform1iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+
+virtual void Uniform2f(GLint location, GLfloat x, GLfloat y) OVERRIDE;
+
+virtual void Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+
+virtual void Uniform2i(GLint location, GLint x, GLint y) OVERRIDE;
+
+virtual void Uniform2iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+
+virtual void Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+
+virtual void Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+
+virtual void Uniform3i(GLint location, GLint x, GLint y, GLint z) OVERRIDE;
+
+virtual void Uniform3iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+
+virtual void Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+
+virtual void Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+
+virtual void Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) OVERRIDE;
+
+virtual void Uniform4iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+
+virtual void UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+
+virtual void UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+
+virtual void UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+
+virtual void UseProgram(GLuint program) OVERRIDE;
+
+virtual void ValidateProgram(GLuint program) OVERRIDE;
+
+virtual void VertexAttrib1f(GLuint indx, GLfloat x) OVERRIDE;
+
+virtual void VertexAttrib1fv(GLuint indx, const GLfloat* values) OVERRIDE;
+
+virtual void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) OVERRIDE;
+
+virtual void VertexAttrib2fv(GLuint indx, const GLfloat* values) OVERRIDE;
+
+virtual void VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+
+virtual void VertexAttrib3fv(GLuint indx, const GLfloat* values) OVERRIDE;
+
+virtual void VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+
+virtual void VertexAttrib4fv(GLuint indx, const GLfloat* values) OVERRIDE;
+
+virtual void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) OVERRIDE;
+
+virtual void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+
+virtual void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) OVERRIDE;
+
+virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) OVERRIDE;
+
+virtual void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) OVERRIDE;
+
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) OVERRIDE;
+
+virtual GLboolean IsQueryEXT(GLuint id) OVERRIDE;
+
+virtual void BeginQueryEXT(GLenum target, GLuint id) OVERRIDE;
+
+virtual void EndQueryEXT(GLenum target) OVERRIDE;
+
+virtual void GetQueryivEXT(GLenum target, GLenum pname, GLint* params) OVERRIDE;
+
+virtual void GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) OVERRIDE;
+
+virtual void InsertEventMarkerEXT(GLsizei length,
+ const GLchar* marker) OVERRIDE;
+
+virtual void PushGroupMarkerEXT(GLsizei length, const GLchar* marker) OVERRIDE;
+
+virtual void PopGroupMarkerEXT() OVERRIDE;
+
+virtual void GenVertexArraysOES(GLsizei n, GLuint* arrays) OVERRIDE;
+
+virtual void DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) OVERRIDE;
+
+virtual GLboolean IsVertexArrayOES(GLuint array) OVERRIDE;
+
+virtual void BindVertexArrayOES(GLuint array) OVERRIDE;
+
+virtual void SwapBuffers() OVERRIDE;
+
+virtual GLuint GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) OVERRIDE;
+
+virtual GLboolean EnableFeatureCHROMIUM(const char* feature) OVERRIDE;
+
+virtual void* MapBufferCHROMIUM(GLuint target, GLenum access) OVERRIDE;
+
+virtual GLboolean UnmapBufferCHROMIUM(GLuint target) OVERRIDE;
+
+virtual void* MapImageCHROMIUM(GLuint image_id) OVERRIDE;
+
+virtual void UnmapImageCHROMIUM(GLuint image_id) OVERRIDE;
+
+virtual void* MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) OVERRIDE;
+
+virtual void UnmapBufferSubDataCHROMIUM(const void* mem) OVERRIDE;
+
+virtual void* MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) OVERRIDE;
+
+virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) OVERRIDE;
+
+virtual void ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) OVERRIDE;
+
+virtual const GLchar* GetRequestableExtensionsCHROMIUM() OVERRIDE;
+
+virtual void RequestExtensionCHROMIUM(const char* extension) OVERRIDE;
+
+virtual void RateLimitOffscreenContextCHROMIUM() OVERRIDE;
+
+virtual void GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) OVERRIDE;
+
+virtual void GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) OVERRIDE;
+
+virtual GLuint CreateStreamTextureCHROMIUM(GLuint texture) OVERRIDE;
+
+virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+
+virtual void DestroyImageCHROMIUM(GLuint image_id) OVERRIDE;
+
+virtual void GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+virtual GLuint CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+
+virtual void GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+
+virtual void PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) OVERRIDE;
+
+virtual void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) OVERRIDE;
+
+virtual void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) OVERRIDE;
+
+virtual void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) OVERRIDE;
+
+virtual void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) OVERRIDE;
+
+virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) OVERRIDE;
+
+virtual void GenMailboxCHROMIUM(GLbyte* mailbox) OVERRIDE;
+
+virtual void ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+
+virtual void ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+
+virtual void ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+
+virtual GLuint CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+
+virtual void BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) OVERRIDE;
+
+virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+
+virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+
+virtual void TraceBeginCHROMIUM(const char* name) OVERRIDE;
+
+virtual void TraceEndCHROMIUM() OVERRIDE;
+
+virtual void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) OVERRIDE;
+
+virtual void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+
+virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
+
+virtual void DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) OVERRIDE;
+
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) OVERRIDE;
+
+virtual GLuint InsertSyncPointCHROMIUM() OVERRIDE;
+
+virtual void WaitSyncPointCHROMIUM(GLuint sync_point) OVERRIDE;
+
+virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) OVERRIDE;
+
+virtual void DiscardBackbufferCHROMIUM() OVERRIDE;
+
+virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) OVERRIDE;
+
+virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) OVERRIDE;
+
+virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) OVERRIDE;
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
new file mode 100644
index 0000000..e63ba63
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -0,0 +1,2165 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_implementation.cc to define the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
+
+void GLES2Implementation::AttachShader(GLuint program, GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glAttachShader(" << program << ", "
+ << shader << ")");
+ helper_->AttachShader(program, shader);
+ CheckGLError();
+}
+
+void GLES2Implementation::BindBuffer(GLenum target, GLuint buffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindBuffer("
+ << GLES2Util::GetStringBufferTarget(target) << ", "
+ << buffer << ")");
+ if (IsBufferReservedId(buffer)) {
+ SetGLError(GL_INVALID_OPERATION, "BindBuffer", "buffer reserved id");
+ return;
+ }
+ if (BindBufferHelper(target, buffer)) {
+ helper_->BindBuffer(target, buffer);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::BindFramebuffer(GLenum target, GLuint framebuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindFramebuffer("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << framebuffer << ")");
+ if (IsFramebufferReservedId(framebuffer)) {
+ SetGLError(
+ GL_INVALID_OPERATION, "BindFramebuffer", "framebuffer reserved id");
+ return;
+ }
+ if (BindFramebufferHelper(target, framebuffer)) {
+ helper_->BindFramebuffer(target, framebuffer);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::BindRenderbuffer(GLenum target, GLuint renderbuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindRenderbuffer("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", "
+ << renderbuffer << ")");
+ if (IsRenderbufferReservedId(renderbuffer)) {
+ SetGLError(
+ GL_INVALID_OPERATION, "BindRenderbuffer", "renderbuffer reserved id");
+ return;
+ }
+ if (BindRenderbufferHelper(target, renderbuffer)) {
+ helper_->BindRenderbuffer(target, renderbuffer);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::BindTexture(GLenum target, GLuint texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindTexture("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << texture << ")");
+ if (IsTextureReservedId(texture)) {
+ SetGLError(GL_INVALID_OPERATION, "BindTexture", "texture reserved id");
+ return;
+ }
+ if (BindTextureHelper(target, texture)) {
+ helper_->BindTexture(target, texture);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendColor(" << red << ", "
+ << green << ", " << blue << ", " << alpha << ")");
+ helper_->BlendColor(red, green, blue, alpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendEquation(GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendEquation("
+ << GLES2Util::GetStringEquation(mode) << ")");
+ helper_->BlendEquation(mode);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendEquationSeparate(GLenum modeRGB,
+ GLenum modeAlpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendEquationSeparate("
+ << GLES2Util::GetStringEquation(modeRGB) << ", "
+ << GLES2Util::GetStringEquation(modeAlpha) << ")");
+ helper_->BlendEquationSeparate(modeRGB, modeAlpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendFunc(GLenum sfactor, GLenum dfactor) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendFunc("
+ << GLES2Util::GetStringSrcBlendFactor(sfactor) << ", "
+ << GLES2Util::GetStringDstBlendFactor(dfactor) << ")");
+ helper_->BlendFunc(sfactor, dfactor);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendFuncSeparate("
+ << GLES2Util::GetStringSrcBlendFactor(srcRGB) << ", "
+ << GLES2Util::GetStringDstBlendFactor(dstRGB) << ", "
+ << GLES2Util::GetStringSrcBlendFactor(srcAlpha) << ", "
+ << GLES2Util::GetStringDstBlendFactor(dstAlpha) << ")");
+ helper_->BlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha);
+ CheckGLError();
+}
+
+GLenum GLES2Implementation::CheckFramebufferStatus(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::CheckFramebufferStatus");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCheckFramebufferStatus("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ")");
+ typedef cmds::CheckFramebufferStatus::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FRAMEBUFFER_UNSUPPORTED;
+ }
+ *result = 0;
+ helper_->CheckFramebufferStatus(
+ target, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLenum result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+void GLES2Implementation::Clear(GLbitfield mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glClear(" << mask << ")");
+ helper_->Clear(mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glClearColor(" << red << ", "
+ << green << ", " << blue << ", " << alpha << ")");
+ helper_->ClearColor(red, green, blue, alpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::ClearDepthf(GLclampf depth) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glClearDepthf(" << depth << ")");
+ helper_->ClearDepthf(depth);
+ CheckGLError();
+}
+
+void GLES2Implementation::ClearStencil(GLint s) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glClearStencil(" << s << ")");
+ helper_->ClearStencil(s);
+ CheckGLError();
+}
+
+void GLES2Implementation::ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glColorMask("
+ << GLES2Util::GetStringBool(red) << ", "
+ << GLES2Util::GetStringBool(green) << ", "
+ << GLES2Util::GetStringBool(blue) << ", "
+ << GLES2Util::GetStringBool(alpha) << ")");
+ helper_->ColorMask(red, green, blue, alpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::CompileShader(GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCompileShader(" << shader
+ << ")");
+ helper_->CompileShader(shader);
+ CheckGLError();
+}
+
+void GLES2Implementation::CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glCopyTexImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", " << level << ", "
+ << GLES2Util::GetStringTextureInternalFormat(internalformat) << ", "
+ << x << ", " << y << ", " << width << ", " << height << ", " << border
+ << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexImage2D", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexImage2D", "height < 0");
+ return;
+ }
+ if (border != 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexImage2D", "border GL_INVALID_VALUE");
+ return;
+ }
+ helper_->CopyTexImage2D(target, level, internalformat, x, y, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCopyTexSubImage2D("
+ << GLES2Util::GetStringTextureTarget(target) << ", "
+ << level << ", " << xoffset << ", " << yoffset << ", " << x
+ << ", " << y << ", " << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexSubImage2D", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glCopyTexSubImage2D", "height < 0");
+ return;
+ }
+ helper_->CopyTexSubImage2D(
+ target, level, xoffset, yoffset, x, y, width, height);
+ CheckGLError();
+}
+
+GLuint GLES2Implementation::CreateProgram() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCreateProgram("
+ << ")");
+ GLuint client_id;
+ GetIdHandler(id_namespaces::kProgramsAndShaders)
+ ->MakeIds(this, 0, 1, &client_id);
+ helper_->CreateProgram(client_id);
+ GPU_CLIENT_LOG("returned " << client_id);
+ CheckGLError();
+ return client_id;
+}
+
+GLuint GLES2Implementation::CreateShader(GLenum type) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCreateShader("
+ << GLES2Util::GetStringShaderType(type) << ")");
+ GLuint client_id;
+ GetIdHandler(id_namespaces::kProgramsAndShaders)
+ ->MakeIds(this, 0, 1, &client_id);
+ helper_->CreateShader(type, client_id);
+ GPU_CLIENT_LOG("returned " << client_id);
+ CheckGLError();
+ return client_id;
+}
+
+void GLES2Implementation::CullFace(GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCullFace("
+ << GLES2Util::GetStringFaceType(mode) << ")");
+ helper_->CullFace(mode);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteBuffers(GLsizei n, const GLuint* buffers) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteBuffers(" << n << ", "
+ << static_cast<const void*>(buffers) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << buffers[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(buffers[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteBuffers", "n < 0");
+ return;
+ }
+ DeleteBuffersHelper(n, buffers);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteFramebuffers(GLsizei n,
+ const GLuint* framebuffers) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteFramebuffers(" << n << ", "
+ << static_cast<const void*>(framebuffers) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << framebuffers[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(framebuffers[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteFramebuffers", "n < 0");
+ return;
+ }
+ DeleteFramebuffersHelper(n, framebuffers);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteProgram(" << program
+ << ")");
+ GPU_CLIENT_DCHECK(program != 0);
+ DeleteProgramHelper(program);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteRenderbuffers(GLsizei n,
+ const GLuint* renderbuffers) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteRenderbuffers(" << n
+ << ", " << static_cast<const void*>(renderbuffers) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << renderbuffers[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(renderbuffers[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteRenderbuffers", "n < 0");
+ return;
+ }
+ DeleteRenderbuffersHelper(n, renderbuffers);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteShader(GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteShader(" << shader << ")");
+ GPU_CLIENT_DCHECK(shader != 0);
+ DeleteShaderHelper(shader);
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteTextures(GLsizei n, const GLuint* textures) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteTextures(" << n << ", "
+ << static_cast<const void*>(textures) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << textures[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(textures[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteTextures", "n < 0");
+ return;
+ }
+ DeleteTexturesHelper(n, textures);
+ CheckGLError();
+}
+
+void GLES2Implementation::DepthFunc(GLenum func) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDepthFunc("
+ << GLES2Util::GetStringCmpFunction(func) << ")");
+ helper_->DepthFunc(func);
+ CheckGLError();
+}
+
+void GLES2Implementation::DepthMask(GLboolean flag) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDepthMask("
+ << GLES2Util::GetStringBool(flag) << ")");
+ helper_->DepthMask(flag);
+ CheckGLError();
+}
+
+void GLES2Implementation::DepthRangef(GLclampf zNear, GLclampf zFar) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDepthRangef(" << zNear << ", "
+ << zFar << ")");
+ helper_->DepthRangef(zNear, zFar);
+ CheckGLError();
+}
+
+void GLES2Implementation::DetachShader(GLuint program, GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDetachShader(" << program << ", "
+ << shader << ")");
+ helper_->DetachShader(program, shader);
+ CheckGLError();
+}
+
+void GLES2Implementation::FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFramebufferRenderbuffer("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << GLES2Util::GetStringAttachment(attachment) << ", "
+ << GLES2Util::GetStringRenderBufferTarget(
+ renderbuffertarget) << ", " << renderbuffer << ")");
+ helper_->FramebufferRenderbuffer(
+ target, attachment, renderbuffertarget, renderbuffer);
+ CheckGLError();
+}
+
+void GLES2Implementation::FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFramebufferTexture2D("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << GLES2Util::GetStringAttachment(attachment) << ", "
+ << GLES2Util::GetStringTextureTarget(textarget) << ", "
+ << texture << ", " << level << ")");
+ if (level != 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glFramebufferTexture2D", "level GL_INVALID_VALUE");
+ return;
+ }
+ helper_->FramebufferTexture2D(target, attachment, textarget, texture);
+ CheckGLError();
+}
+
+void GLES2Implementation::FrontFace(GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glFrontFace("
+ << GLES2Util::GetStringFaceMode(mode) << ")");
+ helper_->FrontFace(mode);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenBuffers(GLsizei n, GLuint* buffers) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenBuffers(" << n << ", "
+ << static_cast<const void*>(buffers) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenBuffers", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kBuffers)->MakeIds(this, 0, n, buffers);
+ GenBuffersHelper(n, buffers);
+ helper_->GenBuffersImmediate(n, buffers);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << buffers[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GenerateMipmap(GLenum target) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenerateMipmap("
+ << GLES2Util::GetStringTextureBindTarget(target) << ")");
+ helper_->GenerateMipmap(target);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenFramebuffers(GLsizei n, GLuint* framebuffers) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenFramebuffers(" << n << ", "
+ << static_cast<const void*>(framebuffers) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenFramebuffers", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kFramebuffers)->MakeIds(this, 0, n, framebuffers);
+ GenFramebuffersHelper(n, framebuffers);
+ helper_->GenFramebuffersImmediate(n, framebuffers);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << framebuffers[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GenRenderbuffers(GLsizei n, GLuint* renderbuffers) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenRenderbuffers(" << n << ", "
+ << static_cast<const void*>(renderbuffers) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenRenderbuffers", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kRenderbuffers)
+ ->MakeIds(this, 0, n, renderbuffers);
+ GenRenderbuffersHelper(n, renderbuffers);
+ helper_->GenRenderbuffersImmediate(n, renderbuffers);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << renderbuffers[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GenTextures(GLsizei n, GLuint* textures) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenTextures(" << n << ", "
+ << static_cast<const void*>(textures) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenTextures", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kTextures)->MakeIds(this, 0, n, textures);
+ GenTexturesHelper(n, textures);
+ helper_->GenTexturesImmediate(n, textures);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << textures[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::GetBooleanv(GLenum pname, GLboolean* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLboolean, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetBooleanv("
+ << GLES2Util::GetStringGLState(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetBooleanv");
+ if (GetBooleanvHelper(pname, params)) {
+ return;
+ }
+ typedef cmds::GetBooleanv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetBooleanv(pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetBufferParameteriv("
+ << GLES2Util::GetStringBufferTarget(target) << ", "
+ << GLES2Util::GetStringBufferParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetBufferParameteriv");
+ if (GetBufferParameterivHelper(target, pname, params)) {
+ return;
+ }
+ typedef cmds::GetBufferParameteriv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetBufferParameteriv(
+ target, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetFloatv(GLenum pname, GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetFloatv("
+ << GLES2Util::GetStringGLState(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetFloatv");
+ if (GetFloatvHelper(pname, params)) {
+ return;
+ }
+ typedef cmds::GetFloatv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetFloatv(pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glGetFramebufferAttachmentParameteriv("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << GLES2Util::GetStringAttachment(attachment) << ", "
+ << GLES2Util::GetStringFrameBufferParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu",
+ "GLES2Implementation::GetFramebufferAttachmentParameteriv");
+ if (GetFramebufferAttachmentParameterivHelper(
+ target, attachment, pname, params)) {
+ return;
+ }
+ typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetFramebufferAttachmentParameteriv(
+ target, attachment, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetIntegerv(GLenum pname, GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetIntegerv("
+ << GLES2Util::GetStringGLState(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetIntegerv");
+ if (GetIntegervHelper(pname, params)) {
+ return;
+ }
+ typedef cmds::GetIntegerv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetIntegerv(pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetProgramiv(GLuint program,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramiv(" << program << ", "
+ << GLES2Util::GetStringProgramParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetProgramiv");
+ if (GetProgramivHelper(program, pname, params)) {
+ return;
+ }
+ typedef cmds::GetProgramiv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetProgramiv(program, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(GLsizei, length);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetProgramInfoLog"
+ << "(" << program << ", " << bufsize << ", "
+ << static_cast<void*>(length) << ", "
+ << static_cast<void*>(infolog) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetProgramInfoLog(program, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size = std::min(static_cast<size_t>(bufsize) - 1, str.size());
+ memcpy(infolog, str.c_str(), max_size);
+ infolog[max_size] = '\0';
+ GPU_CLIENT_LOG("------\n" << infolog << "\n------");
+ }
+ }
+ if (length != NULL) {
+ *length = max_size;
+ }
+ CheckGLError();
+}
+void GLES2Implementation::GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetRenderbufferParameteriv("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", "
+ << GLES2Util::GetStringRenderBufferParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetRenderbufferParameteriv");
+ if (GetRenderbufferParameterivHelper(target, pname, params)) {
+ return;
+ }
+ typedef cmds::GetRenderbufferParameteriv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetRenderbufferParameteriv(
+ target, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetShaderiv(GLuint shader,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetShaderiv(" << shader << ", "
+ << GLES2Util::GetStringShaderParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetShaderiv");
+ if (GetShaderivHelper(shader, pname, params)) {
+ return;
+ }
+ typedef cmds::GetShaderiv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetShaderiv(shader, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(GLsizei, length);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetShaderInfoLog"
+ << "(" << shader << ", " << bufsize << ", "
+ << static_cast<void*>(length) << ", "
+ << static_cast<void*>(infolog) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetShaderInfoLog(shader, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size = std::min(static_cast<size_t>(bufsize) - 1, str.size());
+ memcpy(infolog, str.c_str(), max_size);
+ infolog[max_size] = '\0';
+ GPU_CLIENT_LOG("------\n" << infolog << "\n------");
+ }
+ }
+ if (length != NULL) {
+ *length = max_size;
+ }
+ CheckGLError();
+}
+void GLES2Implementation::GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(GLsizei, length);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetShaderSource"
+ << "(" << shader << ", " << bufsize << ", "
+ << static_cast<void*>(length) << ", "
+ << static_cast<void*>(source) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetShaderSource(shader, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size = std::min(static_cast<size_t>(bufsize) - 1, str.size());
+ memcpy(source, str.c_str(), max_size);
+ source[max_size] = '\0';
+ GPU_CLIENT_LOG("------\n" << source << "\n------");
+ }
+ }
+ if (length != NULL) {
+ *length = max_size;
+ }
+ CheckGLError();
+}
+void GLES2Implementation::GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetTexParameterfv("
+ << GLES2Util::GetStringGetTexParamTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetTexParameterfv");
+ if (GetTexParameterfvHelper(target, pname, params)) {
+ return;
+ }
+ typedef cmds::GetTexParameterfv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetTexParameterfv(
+ target, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLint, params);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetTexParameteriv("
+ << GLES2Util::GetStringGetTexParamTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetTexParameteriv");
+ if (GetTexParameterivHelper(target, pname, params)) {
+ return;
+ }
+ typedef cmds::GetTexParameteriv::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetTexParameteriv(
+ target, pname, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ result->CopyResult(params);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
+void GLES2Implementation::Hint(GLenum target, GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glHint("
+ << GLES2Util::GetStringHintTarget(target) << ", "
+ << GLES2Util::GetStringHintMode(mode) << ")");
+ helper_->Hint(target, mode);
+ CheckGLError();
+}
+
+GLboolean GLES2Implementation::IsBuffer(GLuint buffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsBuffer");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsBuffer(" << buffer << ")");
+ typedef cmds::IsBuffer::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsBuffer(buffer, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsFramebuffer(GLuint framebuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsFramebuffer");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsFramebuffer(" << framebuffer
+ << ")");
+ typedef cmds::IsFramebuffer::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsFramebuffer(framebuffer, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsProgram");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsProgram(" << program << ")");
+ typedef cmds::IsProgram::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsProgram(program, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsRenderbuffer(GLuint renderbuffer) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsRenderbuffer");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsRenderbuffer(" << renderbuffer
+ << ")");
+ typedef cmds::IsRenderbuffer::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsRenderbuffer(renderbuffer, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsShader(GLuint shader) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsShader");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsShader(" << shader << ")");
+ typedef cmds::IsShader::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsShader(shader, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+GLboolean GLES2Implementation::IsTexture(GLuint texture) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsTexture");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsTexture(" << texture << ")");
+ typedef cmds::IsTexture::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsTexture(texture, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+void GLES2Implementation::LineWidth(GLfloat width) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLineWidth(" << width << ")");
+ helper_->LineWidth(width);
+ CheckGLError();
+}
+
+void GLES2Implementation::PolygonOffset(GLfloat factor, GLfloat units) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glPolygonOffset(" << factor << ", "
+ << units << ")");
+ helper_->PolygonOffset(factor, units);
+ CheckGLError();
+}
+
+void GLES2Implementation::ReleaseShaderCompiler() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glReleaseShaderCompiler("
+ << ")");
+ helper_->ReleaseShaderCompiler();
+ CheckGLError();
+}
+
+void GLES2Implementation::RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glRenderbufferStorage("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", "
+ << GLES2Util::GetStringRenderBufferFormat(internalformat)
+ << ", " << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glRenderbufferStorage", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glRenderbufferStorage", "height < 0");
+ return;
+ }
+ helper_->RenderbufferStorage(target, internalformat, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::SampleCoverage(GLclampf value, GLboolean invert) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glSampleCoverage(" << value << ", "
+ << GLES2Util::GetStringBool(invert) << ")");
+ helper_->SampleCoverage(value, invert);
+ CheckGLError();
+}
+
+void GLES2Implementation::Scissor(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glScissor(" << x << ", " << y
+ << ", " << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glScissor", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glScissor", "height < 0");
+ return;
+ }
+ helper_->Scissor(x, y, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilFunc(GLenum func, GLint ref, GLuint mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilFunc("
+ << GLES2Util::GetStringCmpFunction(func) << ", " << ref
+ << ", " << mask << ")");
+ helper_->StencilFunc(func, ref, mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilFuncSeparate("
+ << GLES2Util::GetStringFaceType(face) << ", "
+ << GLES2Util::GetStringCmpFunction(func) << ", " << ref
+ << ", " << mask << ")");
+ helper_->StencilFuncSeparate(face, func, ref, mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilMask(GLuint mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilMask(" << mask << ")");
+ helper_->StencilMask(mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilMaskSeparate(GLenum face, GLuint mask) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilMaskSeparate("
+ << GLES2Util::GetStringFaceType(face) << ", " << mask
+ << ")");
+ helper_->StencilMaskSeparate(face, mask);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilOp(GLenum fail, GLenum zfail, GLenum zpass) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilOp("
+ << GLES2Util::GetStringStencilOp(fail) << ", "
+ << GLES2Util::GetStringStencilOp(zfail) << ", "
+ << GLES2Util::GetStringStencilOp(zpass) << ")");
+ helper_->StencilOp(fail, zfail, zpass);
+ CheckGLError();
+}
+
+void GLES2Implementation::StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glStencilOpSeparate("
+ << GLES2Util::GetStringFaceType(face) << ", "
+ << GLES2Util::GetStringStencilOp(fail) << ", "
+ << GLES2Util::GetStringStencilOp(zfail) << ", "
+ << GLES2Util::GetStringStencilOp(zpass) << ")");
+ helper_->StencilOpSeparate(face, fail, zfail, zpass);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexParameterf(GLenum target,
+ GLenum pname,
+ GLfloat param) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameterf("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << param << ")");
+ helper_->TexParameterf(target, pname, param);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameterfv("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ GPU_CLIENT_LOG("values: " << params[0]);
+ helper_->TexParameterfvImmediate(target, pname, params);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexParameteri(GLenum target,
+ GLenum pname,
+ GLint param) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameteri("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << param << ")");
+ helper_->TexParameteri(target, pname, param);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexParameteriv("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << GLES2Util::GetStringTextureParameter(pname) << ", "
+ << static_cast<const void*>(params) << ")");
+ GPU_CLIENT_LOG("values: " << params[0]);
+ helper_->TexParameterivImmediate(target, pname, params);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform1f(GLint location, GLfloat x) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform1f(" << location << ", "
+ << x << ")");
+ helper_->Uniform1f(location, x);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform1fv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 1]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform1fv", "count < 0");
+ return;
+ }
+ helper_->Uniform1fvImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform1i(GLint location, GLint x) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform1i(" << location << ", "
+ << x << ")");
+ helper_->Uniform1i(location, x);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform1iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform1iv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 1]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform1iv", "count < 0");
+ return;
+ }
+ helper_->Uniform1ivImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform2f(GLint location, GLfloat x, GLfloat y) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform2f(" << location << ", "
+ << x << ", " << y << ")");
+ helper_->Uniform2f(location, x, y);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform2fv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 2] << ", " << v[1 + i * 2]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform2fv", "count < 0");
+ return;
+ }
+ helper_->Uniform2fvImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform2i(GLint location, GLint x, GLint y) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform2i(" << location << ", "
+ << x << ", " << y << ")");
+ helper_->Uniform2i(location, x, y);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform2iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform2iv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 2] << ", " << v[1 + i * 2]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform2iv", "count < 0");
+ return;
+ }
+ helper_->Uniform2ivImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform3f(" << location << ", "
+ << x << ", " << y << ", " << z << ")");
+ helper_->Uniform3f(location, x, y, z);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform3fv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 3] << ", " << v[1 + i * 3]
+ << ", " << v[2 + i * 3]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform3fv", "count < 0");
+ return;
+ }
+ helper_->Uniform3fvImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform3i(GLint location, GLint x, GLint y, GLint z) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform3i(" << location << ", "
+ << x << ", " << y << ", " << z << ")");
+ helper_->Uniform3i(location, x, y, z);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform3iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform3iv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 3] << ", " << v[1 + i * 3]
+ << ", " << v[2 + i * 3]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform3iv", "count < 0");
+ return;
+ }
+ helper_->Uniform3ivImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform4f(" << location << ", "
+ << x << ", " << y << ", " << z << ", " << w << ")");
+ helper_->Uniform4f(location, x, y, z, w);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform4fv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 4] << ", " << v[1 + i * 4]
+ << ", " << v[2 + i * 4] << ", " << v[3 + i * 4]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform4fv", "count < 0");
+ return;
+ }
+ helper_->Uniform4fvImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform4i(" << location << ", "
+ << x << ", " << y << ", " << z << ", " << w << ")");
+ helper_->Uniform4i(location, x, y, z, w);
+ CheckGLError();
+}
+
+void GLES2Implementation::Uniform4iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniform4iv(" << location << ", "
+ << count << ", " << static_cast<const void*>(v) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << v[0 + i * 4] << ", " << v[1 + i * 4]
+ << ", " << v[2 + i * 4] << ", " << v[3 + i * 4]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniform4iv", "count < 0");
+ return;
+ }
+ helper_->Uniform4ivImmediate(location, count, v);
+ CheckGLError();
+}
+
+void GLES2Implementation::UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniformMatrix2fv(" << location
+ << ", " << count << ", "
+ << GLES2Util::GetStringBool(transpose) << ", "
+ << static_cast<const void*>(value) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << value[0 + i * 4] << ", "
+ << value[1 + i * 4] << ", " << value[2 + i * 4]
+ << ", " << value[3 + i * 4]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniformMatrix2fv", "count < 0");
+ return;
+ }
+ if (transpose != false) {
+ SetGLError(
+ GL_INVALID_VALUE, "glUniformMatrix2fv", "transpose GL_INVALID_VALUE");
+ return;
+ }
+ helper_->UniformMatrix2fvImmediate(location, count, value);
+ CheckGLError();
+}
+
+void GLES2Implementation::UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniformMatrix3fv(" << location
+ << ", " << count << ", "
+ << GLES2Util::GetStringBool(transpose) << ", "
+ << static_cast<const void*>(value) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << value[0 + i * 9] << ", "
+ << value[1 + i * 9] << ", " << value[2 + i * 9]
+ << ", " << value[3 + i * 9] << ", "
+ << value[4 + i * 9] << ", " << value[5 + i * 9]
+ << ", " << value[6 + i * 9] << ", "
+ << value[7 + i * 9] << ", " << value[8 + i * 9]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniformMatrix3fv", "count < 0");
+ return;
+ }
+ if (transpose != false) {
+ SetGLError(
+ GL_INVALID_VALUE, "glUniformMatrix3fv", "transpose GL_INVALID_VALUE");
+ return;
+ }
+ helper_->UniformMatrix3fvImmediate(location, count, value);
+ CheckGLError();
+}
+
+void GLES2Implementation::UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUniformMatrix4fv(" << location
+ << ", " << count << ", "
+ << GLES2Util::GetStringBool(transpose) << ", "
+ << static_cast<const void*>(value) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(
+ " " << i << ": " << value[0 + i * 16] << ", " << value[1 + i * 16]
+ << ", " << value[2 + i * 16] << ", " << value[3 + i * 16] << ", "
+ << value[4 + i * 16] << ", " << value[5 + i * 16] << ", "
+ << value[6 + i * 16] << ", " << value[7 + i * 16] << ", "
+ << value[8 + i * 16] << ", " << value[9 + i * 16] << ", "
+ << value[10 + i * 16] << ", " << value[11 + i * 16] << ", "
+ << value[12 + i * 16] << ", " << value[13 + i * 16] << ", "
+ << value[14 + i * 16] << ", " << value[15 + i * 16]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glUniformMatrix4fv", "count < 0");
+ return;
+ }
+ if (transpose != false) {
+ SetGLError(
+ GL_INVALID_VALUE, "glUniformMatrix4fv", "transpose GL_INVALID_VALUE");
+ return;
+ }
+ helper_->UniformMatrix4fvImmediate(location, count, value);
+ CheckGLError();
+}
+
+void GLES2Implementation::UseProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glUseProgram(" << program << ")");
+ if (IsProgramReservedId(program)) {
+ SetGLError(GL_INVALID_OPERATION, "UseProgram", "program reserved id");
+ return;
+ }
+ if (UseProgramHelper(program)) {
+ helper_->UseProgram(program);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::ValidateProgram(GLuint program) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glValidateProgram(" << program
+ << ")");
+ helper_->ValidateProgram(program);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib1f(GLuint indx, GLfloat x) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib1f(" << indx << ", "
+ << x << ")");
+ helper_->VertexAttrib1f(indx, x);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib1fv(GLuint indx, const GLfloat* values) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib1fv(" << indx << ", "
+ << static_cast<const void*>(values) << ")");
+ GPU_CLIENT_LOG("values: " << values[0]);
+ helper_->VertexAttrib1fvImmediate(indx, values);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib2f(" << indx << ", "
+ << x << ", " << y << ")");
+ helper_->VertexAttrib2f(indx, x, y);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib2fv(GLuint indx, const GLfloat* values) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib2fv(" << indx << ", "
+ << static_cast<const void*>(values) << ")");
+ GPU_CLIENT_LOG("values: " << values[0] << ", " << values[1]);
+ helper_->VertexAttrib2fvImmediate(indx, values);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib3f(" << indx << ", "
+ << x << ", " << y << ", " << z << ")");
+ helper_->VertexAttrib3f(indx, x, y, z);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib3fv(GLuint indx, const GLfloat* values) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib3fv(" << indx << ", "
+ << static_cast<const void*>(values) << ")");
+ GPU_CLIENT_LOG("values: " << values[0] << ", " << values[1] << ", "
+ << values[2]);
+ helper_->VertexAttrib3fvImmediate(indx, values);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib4f(" << indx << ", "
+ << x << ", " << y << ", " << z << ", " << w << ")");
+ helper_->VertexAttrib4f(indx, x, y, z, w);
+ CheckGLError();
+}
+
+void GLES2Implementation::VertexAttrib4fv(GLuint indx, const GLfloat* values) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glVertexAttrib4fv(" << indx << ", "
+ << static_cast<const void*>(values) << ")");
+ GPU_CLIENT_LOG("values: " << values[0] << ", " << values[1] << ", "
+ << values[2] << ", " << values[3]);
+ helper_->VertexAttrib4fvImmediate(indx, values);
+ CheckGLError();
+}
+
+void GLES2Implementation::Viewport(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glViewport(" << x << ", " << y
+ << ", " << width << ", " << height << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glViewport", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glViewport", "height < 0");
+ return;
+ }
+ helper_->Viewport(x, y, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlitFramebufferCHROMIUM("
+ << srcX0 << ", " << srcY0 << ", " << srcX1 << ", " << srcY1
+ << ", " << dstX0 << ", " << dstY0 << ", " << dstX1 << ", "
+ << dstY1 << ", " << mask << ", "
+ << GLES2Util::GetStringBlitFilter(filter) << ")");
+ helper_->BlitFramebufferCHROMIUM(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+ CheckGLError();
+}
+
+void GLES2Implementation::RenderbufferStorageMultisampleCHROMIUM(
+ GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glRenderbufferStorageMultisampleCHROMIUM("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", " << samples
+ << ", " << GLES2Util::GetStringRenderBufferFormat(internalformat)
+ << ", " << width << ", " << height << ")");
+ if (samples < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "samples < 0");
+ return;
+ }
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glRenderbufferStorageMultisampleCHROMIUM",
+ "height < 0");
+ return;
+ }
+ helper_->RenderbufferStorageMultisampleCHROMIUM(
+ target, samples, internalformat, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::RenderbufferStorageMultisampleEXT(
+ GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glRenderbufferStorageMultisampleEXT("
+ << GLES2Util::GetStringRenderBufferTarget(target) << ", " << samples
+ << ", " << GLES2Util::GetStringRenderBufferFormat(internalformat)
+ << ", " << width << ", " << height << ")");
+ if (samples < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "samples < 0");
+ return;
+ }
+ if (width < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(
+ GL_INVALID_VALUE, "glRenderbufferStorageMultisampleEXT", "height < 0");
+ return;
+ }
+ helper_->RenderbufferStorageMultisampleEXT(
+ target, samples, internalformat, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glFramebufferTexture2DMultisampleEXT("
+ << GLES2Util::GetStringFrameBufferTarget(target) << ", "
+ << GLES2Util::GetStringAttachment(attachment) << ", "
+ << GLES2Util::GetStringTextureTarget(textarget) << ", "
+ << texture << ", " << level << ", " << samples << ")");
+ if (level != 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glFramebufferTexture2DMultisampleEXT",
+ "level GL_INVALID_VALUE");
+ return;
+ }
+ if (samples < 0) {
+ SetGLError(GL_INVALID_VALUE,
+ "glFramebufferTexture2DMultisampleEXT",
+ "samples < 0");
+ return;
+ }
+ helper_->FramebufferTexture2DMultisampleEXT(
+ target, attachment, textarget, texture, samples);
+ CheckGLError();
+}
+
+void GLES2Implementation::TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glTexStorage2DEXT("
+ << GLES2Util::GetStringTextureTarget(target) << ", " << levels << ", "
+ << GLES2Util::GetStringTextureInternalFormatStorage(internalFormat)
+ << ", " << width << ", " << height << ")");
+ if (levels < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexStorage2DEXT", "levels < 0");
+ return;
+ }
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexStorage2DEXT", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexStorage2DEXT", "height < 0");
+ return;
+ }
+ helper_->TexStorage2DEXT(target, levels, internalFormat, width, height);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenQueriesEXT(GLsizei n, GLuint* queries) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenQueriesEXT(" << n << ", "
+ << static_cast<const void*>(queries) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenQueriesEXT", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ IdAllocator* id_allocator = GetIdAllocator(id_namespaces::kQueries);
+ for (GLsizei ii = 0; ii < n; ++ii)
+ queries[ii] = id_allocator->AllocateID();
+ GenQueriesEXTHelper(n, queries);
+ helper_->GenQueriesEXTImmediate(n, queries);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << queries[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteQueriesEXT(GLsizei n, const GLuint* queries) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteQueriesEXT(" << n << ", "
+ << static_cast<const void*>(queries) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << queries[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(queries[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteQueriesEXT", "n < 0");
+ return;
+ }
+ DeleteQueriesEXTHelper(n, queries);
+ CheckGLError();
+}
+
+void GLES2Implementation::GenVertexArraysOES(GLsizei n, GLuint* arrays) {
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGenVertexArraysOES(" << n << ", "
+ << static_cast<const void*>(arrays) << ")");
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glGenVertexArraysOES", "n < 0");
+ return;
+ }
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GetIdHandler(id_namespaces::kVertexArrays)->MakeIds(this, 0, n, arrays);
+ GenVertexArraysOESHelper(n, arrays);
+ helper_->GenVertexArraysOESImmediate(n, arrays);
+ if (share_group_->bind_generates_resource())
+ helper_->CommandBufferHelper::Flush();
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << arrays[i]);
+ }
+ });
+ CheckGLError();
+}
+
+void GLES2Implementation::DeleteVertexArraysOES(GLsizei n,
+ const GLuint* arrays) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDeleteVertexArraysOES(" << n
+ << ", " << static_cast<const void*>(arrays) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << arrays[i]);
+ }
+ });
+ GPU_CLIENT_DCHECK_CODE_BLOCK({
+ for (GLsizei i = 0; i < n; ++i) {
+ DCHECK(arrays[i] != 0);
+ }
+ });
+ if (n < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDeleteVertexArraysOES", "n < 0");
+ return;
+ }
+ DeleteVertexArraysOESHelper(n, arrays);
+ CheckGLError();
+}
+
+GLboolean GLES2Implementation::IsVertexArrayOES(GLuint array) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ TRACE_EVENT0("gpu", "GLES2Implementation::IsVertexArrayOES");
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsVertexArrayOES(" << array
+ << ")");
+ typedef cmds::IsVertexArrayOES::Result Result;
+ Result* result = GetResultAs<Result*>();
+ if (!result) {
+ return GL_FALSE;
+ }
+ *result = 0;
+ helper_->IsVertexArrayOES(array, GetResultShmId(), GetResultShmOffset());
+ WaitForCmd();
+ GLboolean result_value = *result;
+ GPU_CLIENT_LOG("returned " << result_value);
+ CheckGLError();
+ return result_value;
+}
+
+void GLES2Implementation::BindVertexArrayOES(GLuint array) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindVertexArrayOES(" << array
+ << ")");
+ if (IsVertexArrayReservedId(array)) {
+ SetGLError(GL_INVALID_OPERATION, "BindVertexArrayOES", "array reserved id");
+ return;
+ }
+ if (BindVertexArrayOESHelper(array)) {
+ helper_->BindVertexArrayOES(array);
+ }
+ CheckGLError();
+}
+
+void GLES2Implementation::GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_OPTIONAL_INITALIZATION(GLsizei, length);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetTranslatedShaderSourceANGLE"
+ << "(" << shader << ", " << bufsize << ", "
+ << static_cast<void*>(length) << ", "
+ << static_cast<void*>(source) << ")");
+ helper_->SetBucketSize(kResultBucketId, 0);
+ helper_->GetTranslatedShaderSourceANGLE(shader, kResultBucketId);
+ std::string str;
+ GLsizei max_size = 0;
+ if (GetBucketAsString(kResultBucketId, &str)) {
+ if (bufsize > 0) {
+ max_size = std::min(static_cast<size_t>(bufsize) - 1, str.size());
+ memcpy(source, str.c_str(), max_size);
+ source[max_size] = '\0';
+ GPU_CLIENT_LOG("------\n" << source << "\n------");
+ }
+ }
+ if (length != NULL) {
+ *length = max_size;
+ }
+ CheckGLError();
+}
+void GLES2Implementation::TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTexImageIOSurface2DCHROMIUM("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << width << ", " << height << ", " << ioSurfaceId << ", "
+ << plane << ")");
+ if (width < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImageIOSurface2DCHROMIUM", "width < 0");
+ return;
+ }
+ if (height < 0) {
+ SetGLError(GL_INVALID_VALUE, "glTexImageIOSurface2DCHROMIUM", "height < 0");
+ return;
+ }
+ helper_->TexImageIOSurface2DCHROMIUM(
+ target, width, height, ioSurfaceId, plane);
+ CheckGLError();
+}
+
+void GLES2Implementation::CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glCopyTextureCHROMIUM("
+ << GLES2Util::GetStringEnum(target) << ", "
+ << GLES2Util::GetStringEnum(source_id) << ", "
+ << GLES2Util::GetStringEnum(dest_id) << ", " << level
+ << ", " << internalformat << ", "
+ << GLES2Util::GetStringPixelType(dest_type) << ")");
+ helper_->CopyTextureCHROMIUM(
+ target, source_id, dest_id, level, internalformat, dest_type);
+ CheckGLError();
+}
+
+void GLES2Implementation::BindTexImage2DCHROMIUM(GLenum target, GLint imageId) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBindTexImage2DCHROMIUM("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << imageId << ")");
+ helper_->BindTexImage2DCHROMIUM(target, imageId);
+ CheckGLError();
+}
+
+void GLES2Implementation::ReleaseTexImage2DCHROMIUM(GLenum target,
+ GLint imageId) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glReleaseTexImage2DCHROMIUM("
+ << GLES2Util::GetStringTextureBindTarget(target) << ", "
+ << imageId << ")");
+ helper_->ReleaseTexImage2DCHROMIUM(target, imageId);
+ CheckGLError();
+}
+
+void GLES2Implementation::DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDiscardFramebufferEXT("
+ << GLES2Util::GetStringEnum(target) << ", " << count
+ << ", " << static_cast<const void*>(attachments) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << attachments[0 + i * 1]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDiscardFramebufferEXT", "count < 0");
+ return;
+ }
+ helper_->DiscardFramebufferEXTImmediate(target, count, attachments);
+ CheckGLError();
+}
+
+void GLES2Implementation::LoseContextCHROMIUM(GLenum current, GLenum other) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glLoseContextCHROMIUM("
+ << GLES2Util::GetStringResetStatus(current) << ", "
+ << GLES2Util::GetStringResetStatus(other) << ")");
+ helper_->LoseContextCHROMIUM(current, other);
+ CheckGLError();
+}
+
+void GLES2Implementation::WaitSyncPointCHROMIUM(GLuint sync_point) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glWaitSyncPointCHROMIUM("
+ << sync_point << ")");
+ helper_->WaitSyncPointCHROMIUM(sync_point);
+ CheckGLError();
+}
+
+void GLES2Implementation::DrawBuffersEXT(GLsizei count, const GLenum* bufs) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDrawBuffersEXT(" << count << ", "
+ << static_cast<const void*>(bufs) << ")");
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (GLsizei i = 0; i < count; ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << bufs[0 + i * 1]);
+ }
+ });
+ if (count < 0) {
+ SetGLError(GL_INVALID_VALUE, "glDrawBuffersEXT", "count < 0");
+ return;
+ }
+ helper_->DrawBuffersEXTImmediate(count, bufs);
+ CheckGLError();
+}
+
+void GLES2Implementation::DiscardBackbufferCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDiscardBackbufferCHROMIUM("
+ << ")");
+ helper_->DiscardBackbufferCHROMIUM();
+ CheckGLError();
+}
+
+void GLES2Implementation::ScheduleOverlayPlaneCHROMIUM(
+ GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG(
+ "[" << GetLogPrefix() << "] glScheduleOverlayPlaneCHROMIUM("
+ << plane_z_order << ", " << GLES2Util::GetStringEnum(plane_transform)
+ << ", " << overlay_texture_id << ", " << bounds_x << ", " << bounds_y
+ << ", " << bounds_width << ", " << bounds_height << ", " << uv_x
+ << ", " << uv_y << ", " << uv_width << ", " << uv_height << ")");
+ helper_->ScheduleOverlayPlaneCHROMIUM(plane_z_order,
+ plane_transform,
+ overlay_texture_id,
+ bounds_x,
+ bounds_y,
+ bounds_width,
+ bounds_height,
+ uv_x,
+ uv_y,
+ uv_width,
+ uv_height);
+ CheckGLError();
+}
+
+void GLES2Implementation::MatrixLoadfCHROMIUM(GLenum matrixMode,
+ const GLfloat* m) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMatrixLoadfCHROMIUM("
+ << GLES2Util::GetStringMatrixMode(matrixMode) << ", "
+ << static_cast<const void*>(m) << ")");
+ GPU_CLIENT_LOG("values: " << m[0] << ", " << m[1] << ", " << m[2] << ", "
+ << m[3] << ", " << m[4] << ", " << m[5] << ", "
+ << m[6] << ", " << m[7] << ", " << m[8] << ", "
+ << m[9] << ", " << m[10] << ", " << m[11] << ", "
+ << m[12] << ", " << m[13] << ", " << m[14] << ", "
+ << m[15]);
+ helper_->MatrixLoadfCHROMIUMImmediate(matrixMode, m);
+ CheckGLError();
+}
+
+void GLES2Implementation::MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glMatrixLoadIdentityCHROMIUM("
+ << GLES2Util::GetStringMatrixMode(matrixMode) << ")");
+ helper_->MatrixLoadIdentityCHROMIUM(matrixMode);
+ CheckGLError();
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_implementation_unittest.cc b/gpu/command_buffer/client/gles2_implementation_unittest.cc
new file mode 100644
index 0000000..80a2e41
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -0,0 +1,3414 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for GLES2Implementation.
+
+#include "gpu/command_buffer/client/gles2_implementation.h"
+
+#include <limits>
+
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+#include "base/compiler_specific.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/program_info_manager.h"
+#include "gpu/command_buffer/client/transfer_buffer.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+#if !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+#define GLES2_SUPPORT_CLIENT_SIDE_ARRAYS
+#endif
+
+using testing::_;
+using testing::AtLeast;
+using testing::AnyNumber;
+using testing::DoAll;
+using testing::InSequence;
+using testing::Invoke;
+using testing::Mock;
+using testing::Sequence;
+using testing::StrictMock;
+using testing::Truly;
+using testing::Return;
+
+namespace gpu {
+namespace gles2 {
+
+ACTION_P2(SetMemory, dst, obj) {
+ memcpy(dst, &obj, sizeof(obj));
+}
+
+ACTION_P3(SetMemoryFromArray, dst, array, size) {
+ memcpy(dst, array, size);
+}
+
+// Used to help set the transfer buffer result to SizedResult of a single value.
+template <typename T>
+class SizedResultHelper {
+ public:
+ explicit SizedResultHelper(T result)
+ : size_(sizeof(result)),
+ result_(result) {
+ }
+
+ private:
+ uint32 size_;
+ T result_;
+};
+
+// Struct to make it easy to pass a vec4 worth of floats.
+struct FourFloats {
+ FourFloats(float _x, float _y, float _z, float _w)
+ : x(_x),
+ y(_y),
+ z(_z),
+ w(_w) {
+ }
+
+ float x;
+ float y;
+ float z;
+ float w;
+};
+
+#pragma pack(push, 1)
+// Struct that holds 7 characters.
+struct Str7 {
+ char str[7];
+};
+#pragma pack(pop)
+
+class MockTransferBuffer : public TransferBufferInterface {
+ public:
+ struct ExpectedMemoryInfo {
+ uint32 offset;
+ int32 id;
+ uint8* ptr;
+ };
+
+ MockTransferBuffer(
+ CommandBuffer* command_buffer,
+ unsigned int size,
+ unsigned int result_size,
+ unsigned int alignment)
+ : command_buffer_(command_buffer),
+ size_(size),
+ result_size_(result_size),
+ alignment_(alignment),
+ actual_buffer_index_(0),
+ expected_buffer_index_(0),
+ last_alloc_(NULL),
+ expected_offset_(result_size),
+ actual_offset_(result_size) {
+ // We have to allocate the buffers here because
+ // we need to know their address before GLES2Implementation::Initialize
+ // is called.
+ for (int ii = 0; ii < kNumBuffers; ++ii) {
+ buffers_[ii] = command_buffer_->CreateTransferBuffer(
+ size_ + ii * alignment_,
+ &buffer_ids_[ii]);
+ EXPECT_NE(-1, buffer_ids_[ii]);
+ }
+ }
+
+ virtual ~MockTransferBuffer() { }
+
+ virtual bool Initialize(
+ unsigned int starting_buffer_size,
+ unsigned int result_size,
+ unsigned int /* min_buffer_size */,
+ unsigned int /* max_buffer_size */,
+ unsigned int alignment,
+ unsigned int size_to_flush) OVERRIDE;
+ virtual int GetShmId() OVERRIDE;
+ virtual void* GetResultBuffer() OVERRIDE;
+ virtual int GetResultOffset() OVERRIDE;
+ virtual void Free() OVERRIDE;
+ virtual bool HaveBuffer() const OVERRIDE;
+ virtual void* AllocUpTo(
+ unsigned int size, unsigned int* size_allocated) OVERRIDE;
+ virtual void* Alloc(unsigned int size) OVERRIDE;
+ virtual RingBuffer::Offset GetOffset(void* pointer) const OVERRIDE;
+ virtual void FreePendingToken(void* p, unsigned int /* token */) OVERRIDE;
+
+ size_t MaxTransferBufferSize() {
+ return size_ - result_size_;
+ }
+
+ unsigned int RoundToAlignment(unsigned int size) {
+ return (size + alignment_ - 1) & ~(alignment_ - 1);
+ }
+
+ bool InSync() {
+ return expected_buffer_index_ == actual_buffer_index_ &&
+ expected_offset_ == actual_offset_;
+ }
+
+ ExpectedMemoryInfo GetExpectedMemory(size_t size) {
+ ExpectedMemoryInfo mem;
+ mem.offset = AllocateExpectedTransferBuffer(size);
+ mem.id = GetExpectedTransferBufferId();
+ mem.ptr = static_cast<uint8*>(
+ GetExpectedTransferAddressFromOffset(mem.offset, size));
+ return mem;
+ }
+
+ ExpectedMemoryInfo GetExpectedResultMemory(size_t size) {
+ ExpectedMemoryInfo mem;
+ mem.offset = GetExpectedResultBufferOffset();
+ mem.id = GetExpectedResultBufferId();
+ mem.ptr = static_cast<uint8*>(
+ GetExpectedTransferAddressFromOffset(mem.offset, size));
+ return mem;
+ }
+
+ private:
+ static const int kNumBuffers = 2;
+
+ uint8* actual_buffer() const {
+ return static_cast<uint8*>(buffers_[actual_buffer_index_]->memory());
+ }
+
+ uint8* expected_buffer() const {
+ return static_cast<uint8*>(buffers_[expected_buffer_index_]->memory());
+ }
+
+ uint32 AllocateExpectedTransferBuffer(size_t size) {
+ EXPECT_LE(size, MaxTransferBufferSize());
+
+ // Toggle which buffer we get each time to simulate the buffer being
+ // reallocated.
+ expected_buffer_index_ = (expected_buffer_index_ + 1) % kNumBuffers;
+
+ if (expected_offset_ + size > size_) {
+ expected_offset_ = result_size_;
+ }
+ uint32 offset = expected_offset_;
+ expected_offset_ += RoundToAlignment(size);
+
+ // Make sure each buffer has a different offset.
+ return offset + expected_buffer_index_ * alignment_;
+ }
+
+ void* GetExpectedTransferAddressFromOffset(uint32 offset, size_t size) {
+ EXPECT_GE(offset, expected_buffer_index_ * alignment_);
+ EXPECT_LE(offset + size, size_ + expected_buffer_index_ * alignment_);
+ return expected_buffer() + offset;
+ }
+
+ int GetExpectedResultBufferId() {
+ return buffer_ids_[expected_buffer_index_];
+ }
+
+ uint32 GetExpectedResultBufferOffset() {
+ return expected_buffer_index_ * alignment_;
+ }
+
+ int GetExpectedTransferBufferId() {
+ return buffer_ids_[expected_buffer_index_];
+ }
+
+ CommandBuffer* command_buffer_;
+ size_t size_;
+ size_t result_size_;
+ uint32 alignment_;
+ int buffer_ids_[kNumBuffers];
+ scoped_refptr<Buffer> buffers_[kNumBuffers];
+ int actual_buffer_index_;
+ int expected_buffer_index_;
+ void* last_alloc_;
+ uint32 expected_offset_;
+ uint32 actual_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockTransferBuffer);
+};
+
+bool MockTransferBuffer::Initialize(
+ unsigned int starting_buffer_size,
+ unsigned int result_size,
+ unsigned int /* min_buffer_size */,
+ unsigned int /* max_buffer_size */,
+ unsigned int alignment,
+ unsigned int /* size_to_flush */) {
+ // Just check they match.
+ return size_ == starting_buffer_size &&
+ result_size_ == result_size &&
+ alignment_ == alignment;
+};
+
+int MockTransferBuffer::GetShmId() {
+ return buffer_ids_[actual_buffer_index_];
+}
+
+void* MockTransferBuffer::GetResultBuffer() {
+ return actual_buffer() + actual_buffer_index_ * alignment_;
+}
+
+int MockTransferBuffer::GetResultOffset() {
+ return actual_buffer_index_ * alignment_;
+}
+
+void MockTransferBuffer::Free() {
+ NOTREACHED();
+}
+
+bool MockTransferBuffer::HaveBuffer() const {
+ return true;
+}
+
+void* MockTransferBuffer::AllocUpTo(
+ unsigned int size, unsigned int* size_allocated) {
+ EXPECT_TRUE(size_allocated != NULL);
+ EXPECT_TRUE(last_alloc_ == NULL);
+
+ // Toggle which buffer we get each time to simulate the buffer being
+ // reallocated.
+ actual_buffer_index_ = (actual_buffer_index_ + 1) % kNumBuffers;
+
+ size = std::min(static_cast<size_t>(size), MaxTransferBufferSize());
+ if (actual_offset_ + size > size_) {
+ actual_offset_ = result_size_;
+ }
+ uint32 offset = actual_offset_;
+ actual_offset_ += RoundToAlignment(size);
+ *size_allocated = size;
+
+ // Make sure each buffer has a different offset.
+ last_alloc_ = actual_buffer() + offset + actual_buffer_index_ * alignment_;
+ return last_alloc_;
+}
+
+void* MockTransferBuffer::Alloc(unsigned int size) {
+ EXPECT_LE(size, MaxTransferBufferSize());
+ unsigned int temp = 0;
+ void* p = AllocUpTo(size, &temp);
+ EXPECT_EQ(temp, size);
+ return p;
+}
+
+RingBuffer::Offset MockTransferBuffer::GetOffset(void* pointer) const {
+ // Make sure each buffer has a different offset.
+ return static_cast<uint8*>(pointer) - actual_buffer();
+}
+
+void MockTransferBuffer::FreePendingToken(void* p, unsigned int /* token */) {
+ EXPECT_EQ(last_alloc_, p);
+ last_alloc_ = NULL;
+}
+
+// API wrapper for Buffers.
+class GenBuffersAPI {
+ public:
+ static void Gen(GLES2Implementation* gl_impl, GLsizei n, GLuint* ids) {
+ gl_impl->GenBuffers(n, ids);
+ }
+
+ static void Delete(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids) {
+ gl_impl->DeleteBuffers(n, ids);
+ }
+};
+
+// API wrapper for Framebuffers.
+class GenFramebuffersAPI {
+ public:
+ static void Gen(GLES2Implementation* gl_impl, GLsizei n, GLuint* ids) {
+ gl_impl->GenFramebuffers(n, ids);
+ }
+
+ static void Delete(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids) {
+ gl_impl->DeleteFramebuffers(n, ids);
+ }
+};
+
+// API wrapper for Renderbuffers.
+class GenRenderbuffersAPI {
+ public:
+ static void Gen(GLES2Implementation* gl_impl, GLsizei n, GLuint* ids) {
+ gl_impl->GenRenderbuffers(n, ids);
+ }
+
+ static void Delete(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids) {
+ gl_impl->DeleteRenderbuffers(n, ids);
+ }
+};
+
+// API wrapper for Textures.
+class GenTexturesAPI {
+ public:
+ static void Gen(GLES2Implementation* gl_impl, GLsizei n, GLuint* ids) {
+ gl_impl->GenTextures(n, ids);
+ }
+
+ static void Delete(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids) {
+ gl_impl->DeleteTextures(n, ids);
+ }
+};
+
+class GLES2ImplementationTest : public testing::Test {
+ protected:
+ static const int kNumTestContexts = 2;
+ static const uint8 kInitialValue = 0xBD;
+ static const int32 kNumCommandEntries = 500;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+ static const size_t kTransferBufferSize = 512;
+
+ static const GLint kMaxCombinedTextureImageUnits = 8;
+ static const GLint kMaxCubeMapTextureSize = 64;
+ static const GLint kMaxFragmentUniformVectors = 16;
+ static const GLint kMaxRenderbufferSize = 64;
+ static const GLint kMaxTextureImageUnits = 8;
+ static const GLint kMaxTextureSize = 128;
+ static const GLint kMaxVaryingVectors = 8;
+ static const GLint kMaxVertexAttribs = 8;
+ static const GLint kMaxVertexTextureImageUnits = 0;
+ static const GLint kMaxVertexUniformVectors = 128;
+ static const GLint kNumCompressedTextureFormats = 0;
+ static const GLint kNumShaderBinaryFormats = 0;
+ static const GLuint kStartId = 1024;
+ static const GLuint kBuffersStartId =
+ GLES2Implementation::kClientSideArrayId + 2 * kNumTestContexts;
+ static const GLuint kFramebuffersStartId = 1;
+ static const GLuint kProgramsAndShadersStartId = 1;
+ static const GLuint kRenderbuffersStartId = 1;
+ static const GLuint kTexturesStartId = 1;
+ static const GLuint kQueriesStartId = 1;
+ static const GLuint kVertexArraysStartId = 1;
+
+ typedef MockTransferBuffer::ExpectedMemoryInfo ExpectedMemoryInfo;
+
+ class TestContext {
+ public:
+ TestContext() : commands_(NULL), token_(0) {}
+
+ bool Initialize(ShareGroup* share_group,
+ bool bind_generates_resource_client,
+ bool bind_generates_resource_service,
+ bool lose_context_when_out_of_memory) {
+ command_buffer_.reset(new StrictMock<MockClientCommandBuffer>());
+ if (!command_buffer_->Initialize())
+ return false;
+
+ transfer_buffer_.reset(
+ new MockTransferBuffer(command_buffer_.get(),
+ kTransferBufferSize,
+ GLES2Implementation::kStartingOffset,
+ GLES2Implementation::kAlignment));
+
+ helper_.reset(new GLES2CmdHelper(command_buffer()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+
+ gpu_control_.reset(new StrictMock<MockClientGpuControl>());
+ EXPECT_CALL(*gpu_control_, GetCapabilities())
+ .WillOnce(testing::Return(Capabilities()));
+
+ GLES2Implementation::GLStaticState state;
+ GLES2Implementation::GLStaticState::IntState& int_state = state.int_state;
+ int_state.max_combined_texture_image_units =
+ kMaxCombinedTextureImageUnits;
+ int_state.max_cube_map_texture_size = kMaxCubeMapTextureSize;
+ int_state.max_fragment_uniform_vectors = kMaxFragmentUniformVectors;
+ int_state.max_renderbuffer_size = kMaxRenderbufferSize;
+ int_state.max_texture_image_units = kMaxTextureImageUnits;
+ int_state.max_texture_size = kMaxTextureSize;
+ int_state.max_varying_vectors = kMaxVaryingVectors;
+ int_state.max_vertex_attribs = kMaxVertexAttribs;
+ int_state.max_vertex_texture_image_units = kMaxVertexTextureImageUnits;
+ int_state.max_vertex_uniform_vectors = kMaxVertexUniformVectors;
+ int_state.num_compressed_texture_formats = kNumCompressedTextureFormats;
+ int_state.num_shader_binary_formats = kNumShaderBinaryFormats;
+ int_state.bind_generates_resource_chromium =
+ bind_generates_resource_service ? 1 : 0;
+
+ // This just happens to work for now because IntState has 1 GLint per
+ // state.
+ // If IntState gets more complicated this code will need to get more
+ // complicated.
+ ExpectedMemoryInfo mem1 = transfer_buffer_->GetExpectedMemory(
+ sizeof(GLES2Implementation::GLStaticState::IntState) * 2 +
+ sizeof(cmds::GetShaderPrecisionFormat::Result) * 12);
+
+ {
+ InSequence sequence;
+
+ EXPECT_CALL(*command_buffer_, OnFlush())
+ .WillOnce(SetMemory(mem1.ptr + sizeof(int_state), int_state))
+ .RetiresOnSaturation();
+ GetNextToken(); // eat the token that starting up will use.
+
+ gl_.reset(new GLES2Implementation(helper_.get(),
+ share_group,
+ transfer_buffer_.get(),
+ bind_generates_resource_client,
+ lose_context_when_out_of_memory,
+ gpu_control_.get()));
+
+ if (!gl_->Initialize(kTransferBufferSize,
+ kTransferBufferSize,
+ kTransferBufferSize,
+ GLES2Implementation::kNoLimit))
+ return false;
+ }
+
+ EXPECT_CALL(*command_buffer_, OnFlush()).Times(1).RetiresOnSaturation();
+ helper_->CommandBufferHelper::Finish();
+ ::testing::Mock::VerifyAndClearExpectations(gl_.get());
+
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ commands_ = static_cast<CommandBufferEntry*>(ring_buffer->memory()) +
+ command_buffer()->GetLastState().put_offset;
+ ClearCommands();
+ EXPECT_TRUE(transfer_buffer_->InSync());
+
+ ::testing::Mock::VerifyAndClearExpectations(command_buffer());
+ return true;
+ }
+
+ void TearDown() {
+ Mock::VerifyAndClear(gl_.get());
+ EXPECT_CALL(*command_buffer(), OnFlush()).Times(AnyNumber());
+ // For command buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(AtLeast(1));
+ gl_.reset();
+ }
+
+ MockClientCommandBuffer* command_buffer() const {
+ return command_buffer_.get();
+ }
+
+ int GetNextToken() { return ++token_; }
+
+ void ClearCommands() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ memset(ring_buffer->memory(), kInitialValue, ring_buffer->size());
+ }
+
+ scoped_ptr<MockClientCommandBuffer> command_buffer_;
+ scoped_ptr<MockClientGpuControl> gpu_control_;
+ scoped_ptr<GLES2CmdHelper> helper_;
+ scoped_ptr<MockTransferBuffer> transfer_buffer_;
+ scoped_ptr<GLES2Implementation> gl_;
+ CommandBufferEntry* commands_;
+ int token_;
+ };
+
+ GLES2ImplementationTest() : commands_(NULL) {}
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ bool NoCommandsWritten() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ const uint8* cmds = reinterpret_cast<const uint8*>(ring_buffer->memory());
+ const uint8* end = cmds + ring_buffer->size();
+ for (; cmds < end; ++cmds) {
+ if (*cmds != kInitialValue) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ QueryTracker::Query* GetQuery(GLuint id) {
+ return gl_->query_tracker_->GetQuery(id);
+ }
+
+ struct ContextInitOptions {
+ ContextInitOptions()
+ : bind_generates_resource_client(true),
+ bind_generates_resource_service(true),
+ lose_context_when_out_of_memory(false) {}
+
+ bool bind_generates_resource_client;
+ bool bind_generates_resource_service;
+ bool lose_context_when_out_of_memory;
+ };
+
+ bool Initialize(const ContextInitOptions& init_options) {
+ bool success = true;
+ share_group_ = new ShareGroup(init_options.bind_generates_resource_client);
+
+ for (int i = 0; i < kNumTestContexts; i++) {
+ if (!test_contexts_[i].Initialize(
+ share_group_.get(),
+ init_options.bind_generates_resource_client,
+ init_options.bind_generates_resource_service,
+ init_options.lose_context_when_out_of_memory))
+ success = false;
+ }
+
+ // Default to test context 0.
+ gpu_control_ = test_contexts_[0].gpu_control_.get();
+ helper_ = test_contexts_[0].helper_.get();
+ transfer_buffer_ = test_contexts_[0].transfer_buffer_.get();
+ gl_ = test_contexts_[0].gl_.get();
+ commands_ = test_contexts_[0].commands_;
+ return success;
+ }
+
+ MockClientCommandBuffer* command_buffer() const {
+ return test_contexts_[0].command_buffer_.get();
+ }
+
+ int GetNextToken() { return test_contexts_[0].GetNextToken(); }
+
+ const void* GetPut() {
+ return helper_->GetSpace(0);
+ }
+
+ void ClearCommands() {
+ scoped_refptr<Buffer> ring_buffer = helper_->get_ring_buffer();
+ memset(ring_buffer->memory(), kInitialValue, ring_buffer->size());
+ }
+
+ size_t MaxTransferBufferSize() {
+ return transfer_buffer_->MaxTransferBufferSize();
+ }
+
+ ExpectedMemoryInfo GetExpectedMemory(size_t size) {
+ return transfer_buffer_->GetExpectedMemory(size);
+ }
+
+ ExpectedMemoryInfo GetExpectedResultMemory(size_t size) {
+ return transfer_buffer_->GetExpectedResultMemory(size);
+ }
+
+ // Sets the ProgramInfoManager. The manager will be owned
+ // by the ShareGroup.
+ void SetProgramInfoManager(ProgramInfoManager* manager) {
+ gl_->share_group()->set_program_info_manager(manager);
+ }
+
+ int CheckError() {
+ ExpectedMemoryInfo result =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ return gl_->GetError();
+ }
+
+ const std::string& GetLastError() {
+ return gl_->GetLastError();
+ }
+
+ bool GetBucketContents(uint32 bucket_id, std::vector<int8>* data) {
+ return gl_->GetBucketContents(bucket_id, data);
+ }
+
+ TestContext test_contexts_[kNumTestContexts];
+
+ scoped_refptr<ShareGroup> share_group_;
+ MockClientGpuControl* gpu_control_;
+ GLES2CmdHelper* helper_;
+ MockTransferBuffer* transfer_buffer_;
+ GLES2Implementation* gl_;
+ CommandBufferEntry* commands_;
+};
+
+void GLES2ImplementationTest::SetUp() {
+ ContextInitOptions init_options;
+ ASSERT_TRUE(Initialize(init_options));
+}
+
+void GLES2ImplementationTest::TearDown() {
+ for (int i = 0; i < kNumTestContexts; i++)
+ test_contexts_[i].TearDown();
+}
+
+class GLES2ImplementationManualInitTest : public GLES2ImplementationTest {
+ protected:
+ virtual void SetUp() OVERRIDE {}
+};
+
+class GLES2ImplementationStrictSharedTest : public GLES2ImplementationTest {
+ protected:
+ virtual void SetUp() OVERRIDE;
+
+ template <class ResApi>
+ void FlushGenerationTest() {
+ GLuint id1, id2, id3;
+
+ // Generate valid id.
+ ResApi::Gen(gl_, 1, &id1);
+ EXPECT_NE(id1, 0u);
+
+ // Delete id1 and generate id2. id1 should not be reused.
+ ResApi::Delete(gl_, 1, &id1);
+ ResApi::Gen(gl_, 1, &id2);
+ EXPECT_NE(id2, 0u);
+ EXPECT_NE(id2, id1);
+
+ // Expect id1 reuse after Flush.
+ gl_->Flush();
+ ResApi::Gen(gl_, 1, &id3);
+ EXPECT_EQ(id3, id1);
+ }
+
+ // Ids should not be reused unless the |Deleting| context does a Flush()
+ // AND triggers a lazy release after that.
+ template <class ResApi>
+ void CrossContextGenerationTest() {
+ GLES2Implementation* gl1 = test_contexts_[0].gl_.get();
+ GLES2Implementation* gl2 = test_contexts_[1].gl_.get();
+ GLuint id1, id2, id3;
+
+ // Delete, no flush on context 1. No reuse.
+ ResApi::Gen(gl1, 1, &id1);
+ ResApi::Delete(gl1, 1, &id1);
+ ResApi::Gen(gl1, 1, &id2);
+ EXPECT_NE(id1, id2);
+
+ // Flush context 2. Still no reuse.
+ gl2->Flush();
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_NE(id1, id3);
+ EXPECT_NE(id2, id3);
+
+ // Flush on context 1, but no lazy release. Still no reuse.
+ gl1->Flush();
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_NE(id1, id3);
+
+ // Lazy release triggered by another Delete. Should reuse id1.
+ ResApi::Delete(gl1, 1, &id2);
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_EQ(id1, id3);
+ }
+
+ // Same as CrossContextGenerationTest(), but triggers an Auto Flush on
+ // the Delete(). Tests an edge case regression.
+ template <class ResApi>
+ void CrossContextGenerationAutoFlushTest() {
+ GLES2Implementation* gl1 = test_contexts_[0].gl_.get();
+ GLES2Implementation* gl2 = test_contexts_[1].gl_.get();
+ GLuint id1, id2, id3;
+
+ // Delete, no flush on context 1. No reuse.
+ // By half filling the buffer, an internal flush is forced on the Delete().
+ ResApi::Gen(gl1, 1, &id1);
+ gl1->helper()->Noop(kNumCommandEntries / 2);
+ ResApi::Delete(gl1, 1, &id1);
+ ResApi::Gen(gl1, 1, &id2);
+ EXPECT_NE(id1, id2);
+
+ // Flush context 2. Still no reuse.
+ gl2->Flush();
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_NE(id1, id3);
+ EXPECT_NE(id2, id3);
+
+ // Flush on context 1, but no lazy release. Still no reuse.
+ gl1->Flush();
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_NE(id1, id3);
+
+ // Lazy release triggered by another Delete. Should reuse id1.
+ ResApi::Delete(gl1, 1, &id2);
+ ResApi::Gen(gl2, 1, &id3);
+ EXPECT_EQ(id1, id3);
+ }
+};
+
+void GLES2ImplementationStrictSharedTest::SetUp() {
+ ContextInitOptions init_options;
+ init_options.bind_generates_resource_client = false;
+ init_options.bind_generates_resource_service = false;
+ ASSERT_TRUE(Initialize(init_options));
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const uint8 GLES2ImplementationTest::kInitialValue;
+const int32 GLES2ImplementationTest::kNumCommandEntries;
+const int32 GLES2ImplementationTest::kCommandBufferSizeBytes;
+const size_t GLES2ImplementationTest::kTransferBufferSize;
+const GLint GLES2ImplementationTest::kMaxCombinedTextureImageUnits;
+const GLint GLES2ImplementationTest::kMaxCubeMapTextureSize;
+const GLint GLES2ImplementationTest::kMaxFragmentUniformVectors;
+const GLint GLES2ImplementationTest::kMaxRenderbufferSize;
+const GLint GLES2ImplementationTest::kMaxTextureImageUnits;
+const GLint GLES2ImplementationTest::kMaxTextureSize;
+const GLint GLES2ImplementationTest::kMaxVaryingVectors;
+const GLint GLES2ImplementationTest::kMaxVertexAttribs;
+const GLint GLES2ImplementationTest::kMaxVertexTextureImageUnits;
+const GLint GLES2ImplementationTest::kMaxVertexUniformVectors;
+const GLint GLES2ImplementationTest::kNumCompressedTextureFormats;
+const GLint GLES2ImplementationTest::kNumShaderBinaryFormats;
+const GLuint GLES2ImplementationTest::kStartId;
+const GLuint GLES2ImplementationTest::kBuffersStartId;
+const GLuint GLES2ImplementationTest::kFramebuffersStartId;
+const GLuint GLES2ImplementationTest::kProgramsAndShadersStartId;
+const GLuint GLES2ImplementationTest::kRenderbuffersStartId;
+const GLuint GLES2ImplementationTest::kTexturesStartId;
+const GLuint GLES2ImplementationTest::kQueriesStartId;
+const GLuint GLES2ImplementationTest::kVertexArraysStartId;
+#endif
+
+TEST_F(GLES2ImplementationTest, Basic) {
+ EXPECT_TRUE(gl_->share_group() != NULL);
+}
+
+TEST_F(GLES2ImplementationTest, GetBucketContents) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const uint32 kTestSize = MaxTransferBufferSize() + 32;
+
+ scoped_ptr<uint8[]> buf(new uint8 [kTestSize]);
+ uint8* expected_data = buf.get();
+ for (uint32 ii = 0; ii < kTestSize; ++ii) {
+ expected_data[ii] = ii * 3;
+ }
+
+ struct Cmds {
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::GetBucketData get_bucket_data;
+ cmd::SetToken set_token2;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(sizeof(uint32));
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(
+ kTestSize - MaxTransferBufferSize());
+
+ Cmds expected;
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.get_bucket_data.Init(
+ kBucketId, MaxTransferBufferSize(),
+ kTestSize - MaxTransferBufferSize(), mem2.id, mem2.offset);
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ expected.set_token2.Init(GetNextToken());
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(
+ SetMemory(result1.ptr, kTestSize),
+ SetMemoryFromArray(
+ mem1.ptr, expected_data, MaxTransferBufferSize())))
+ .WillOnce(SetMemoryFromArray(
+ mem2.ptr, expected_data + MaxTransferBufferSize(),
+ kTestSize - MaxTransferBufferSize()))
+ .RetiresOnSaturation();
+
+ std::vector<int8> data;
+ GetBucketContents(kBucketId, &data);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ASSERT_EQ(kTestSize, data.size());
+ EXPECT_EQ(0, memcmp(expected_data, &data[0], data.size()));
+}
+
+TEST_F(GLES2ImplementationTest, GetShaderPrecisionFormat) {
+ struct Cmds {
+ cmds::GetShaderPrecisionFormat cmd;
+ };
+ typedef cmds::GetShaderPrecisionFormat::Result Result;
+
+ // The first call for mediump should trigger a command buffer request.
+ GLint range1[2] = {0, 0};
+ GLint precision1 = 0;
+ Cmds expected1;
+ ExpectedMemoryInfo client_result1 = GetExpectedResultMemory(4);
+ expected1.cmd.Init(GL_FRAGMENT_SHADER, GL_MEDIUM_FLOAT,
+ client_result1.id, client_result1.offset);
+ Result server_result1 = {true, 14, 14, 10};
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(client_result1.ptr, server_result1))
+ .RetiresOnSaturation();
+ gl_->GetShaderPrecisionFormat(GL_FRAGMENT_SHADER, GL_MEDIUM_FLOAT,
+ range1, &precision1);
+ const void* commands2 = GetPut();
+ EXPECT_NE(commands_, commands2);
+ EXPECT_EQ(0, memcmp(&expected1, commands_, sizeof(expected1)));
+ EXPECT_EQ(range1[0], 14);
+ EXPECT_EQ(range1[1], 14);
+ EXPECT_EQ(precision1, 10);
+
+ // The second call for mediump should use the cached value and avoid
+ // triggering a command buffer request, so we do not expect a call to
+ // OnFlush() here. We do expect the results to be correct though.
+ GLint range2[2] = {0, 0};
+ GLint precision2 = 0;
+ gl_->GetShaderPrecisionFormat(GL_FRAGMENT_SHADER, GL_MEDIUM_FLOAT,
+ range2, &precision2);
+ const void* commands3 = GetPut();
+ EXPECT_EQ(commands2, commands3);
+ EXPECT_EQ(range2[0], 14);
+ EXPECT_EQ(range2[1], 14);
+ EXPECT_EQ(precision2, 10);
+
+ // If we then make a request for highp, we should get another command
+ // buffer request since it hasn't been cached yet.
+ GLint range3[2] = {0, 0};
+ GLint precision3 = 0;
+ Cmds expected3;
+ ExpectedMemoryInfo result3 = GetExpectedResultMemory(4);
+ expected3.cmd.Init(GL_FRAGMENT_SHADER, GL_HIGH_FLOAT,
+ result3.id, result3.offset);
+ Result result3_source = {true, 62, 62, 16};
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result3.ptr, result3_source))
+ .RetiresOnSaturation();
+ gl_->GetShaderPrecisionFormat(GL_FRAGMENT_SHADER, GL_HIGH_FLOAT,
+ range3, &precision3);
+ const void* commands4 = GetPut();
+ EXPECT_NE(commands3, commands4);
+ EXPECT_EQ(0, memcmp(&expected3, commands3, sizeof(expected3)));
+ EXPECT_EQ(range3[0], 62);
+ EXPECT_EQ(range3[1], 62);
+ EXPECT_EQ(precision3, 16);
+}
+
+TEST_F(GLES2ImplementationTest, ShaderSource) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const GLuint kShaderId = 456;
+ const char* kString1 = "foobar";
+ const char* kString2 = "barfoo";
+ const size_t kString1Size = strlen(kString1);
+ const size_t kString2Size = strlen(kString2);
+ const size_t kString3Size = 1; // Want the NULL;
+ const size_t kSourceSize = kString1Size + kString2Size + kString3Size;
+ const size_t kPaddedString1Size =
+ transfer_buffer_->RoundToAlignment(kString1Size);
+ const size_t kPaddedString2Size =
+ transfer_buffer_->RoundToAlignment(kString2Size);
+ const size_t kPaddedString3Size =
+ transfer_buffer_->RoundToAlignment(kString3Size);
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size;
+ cmd::SetBucketData set_bucket_data1;
+ cmd::SetToken set_token1;
+ cmd::SetBucketData set_bucket_data2;
+ cmd::SetToken set_token2;
+ cmd::SetBucketData set_bucket_data3;
+ cmd::SetToken set_token3;
+ cmds::ShaderSourceBucket shader_source_bucket;
+ cmd::SetBucketSize clear_bucket_size;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kPaddedString1Size);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kPaddedString2Size);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kPaddedString3Size);
+
+ Cmds expected;
+ expected.set_bucket_size.Init(kBucketId, kSourceSize);
+ expected.set_bucket_data1.Init(
+ kBucketId, 0, kString1Size, mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_data2.Init(
+ kBucketId, kString1Size, kString2Size, mem2.id, mem2.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_bucket_data3.Init(
+ kBucketId, kString1Size + kString2Size,
+ kString3Size, mem3.id, mem3.offset);
+ expected.set_token3.Init(GetNextToken());
+ expected.shader_source_bucket.Init(kShaderId, kBucketId);
+ expected.clear_bucket_size.Init(kBucketId, 0);
+ const char* strings[] = {
+ kString1,
+ kString2,
+ };
+ gl_->ShaderSource(kShaderId, 2, strings, NULL);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GetShaderSource) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const GLuint kShaderId = 456;
+ const Str7 kString = {"foobar"};
+ const char kBad = 0x12;
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetShaderSource get_shader_source;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(sizeof(uint32));
+
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_shader_source.Init(kShaderId, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ char buf[sizeof(kString) + 1];
+ memset(buf, kBad, sizeof(buf));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .RetiresOnSaturation();
+
+ GLsizei length = 0;
+ gl_->GetShaderSource(kShaderId, sizeof(buf), &length, buf);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(sizeof(kString) - 1, static_cast<size_t>(length));
+ EXPECT_STREQ(kString.str, buf);
+ EXPECT_EQ(buf[sizeof(kString)], kBad);
+}
+
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+
+TEST_F(GLES2ImplementationTest, DrawArraysClientSideBuffers) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawArrays draw;
+ cmds::BindBuffer restore;
+ };
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLint kFirst = 1;
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem2.id, mem2.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(
+ kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kFirst, kCount);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(
+ kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawArrays(GL_POINTS, kFirst, kCount);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawArraysInstancedANGLEClientSideBuffers) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::VertexAttribDivisorANGLE divisor;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawArraysInstancedANGLE draw;
+ cmds::BindBuffer restore;
+ };
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLint kFirst = 1;
+ const GLsizei kCount = 2;
+ const GLuint kDivisor = 1;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ 1 * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.divisor.Init(kAttribIndex2, kDivisor);
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem2.id, mem2.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(
+ kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kFirst, kCount, 1);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(
+ kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribDivisorANGLE(kAttribIndex2, kDivisor);
+ gl_->DrawArraysInstancedANGLE(GL_POINTS, kFirst, kCount, 1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawElementsClientSideBuffers) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ static const uint16 indices[] = {
+ 1, 2,
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::BindBuffer bind_to_index_emu;
+ cmds::BufferData set_index_size;
+ cmds::BufferSubData copy_data0;
+ cmd::SetToken set_token0;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawElements draw;
+ cmds::BindBuffer restore;
+ cmds::BindBuffer restore_element;
+ };
+ const GLsizei kIndexSize = sizeof(indices);
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kEmuIndexBufferId =
+ GLES2Implementation::kClientSideElementArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kIndexSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.bind_to_index_emu.Init(GL_ELEMENT_ARRAY_BUFFER, kEmuIndexBufferId);
+ expected.set_index_size.Init(
+ GL_ELEMENT_ARRAY_BUFFER, kIndexSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data0.Init(
+ GL_ELEMENT_ARRAY_BUFFER, 0, kIndexSize, mem1.id, mem1.offset);
+ expected.set_token0.Init(GetNextToken());
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem2.id, mem2.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem3.id, mem3.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kCount, GL_UNSIGNED_SHORT, 0);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ expected.restore_element.Init(GL_ELEMENT_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawElements(GL_POINTS, kCount, GL_UNSIGNED_SHORT, indices);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawElementsClientSideBuffersIndexUint) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ static const uint32 indices[] = {
+ 1, 2,
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::BindBuffer bind_to_index_emu;
+ cmds::BufferData set_index_size;
+ cmds::BufferSubData copy_data0;
+ cmd::SetToken set_token0;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawElements draw;
+ cmds::BindBuffer restore;
+ cmds::BindBuffer restore_element;
+ };
+ const GLsizei kIndexSize = sizeof(indices);
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kEmuIndexBufferId =
+ GLES2Implementation::kClientSideElementArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kIndexSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.bind_to_index_emu.Init(GL_ELEMENT_ARRAY_BUFFER, kEmuIndexBufferId);
+ expected.set_index_size.Init(
+ GL_ELEMENT_ARRAY_BUFFER, kIndexSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data0.Init(
+ GL_ELEMENT_ARRAY_BUFFER, 0, kIndexSize, mem1.id, mem1.offset);
+ expected.set_token0.Init(GetNextToken());
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem2.id, mem2.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem3.id, mem3.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kCount, GL_UNSIGNED_INT, 0);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ expected.restore_element.Init(GL_ELEMENT_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawElements(GL_POINTS, kCount, GL_UNSIGNED_INT, indices);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawElementsClientSideBuffersInvalidIndexUint) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ static const uint32 indices[] = {
+ 1, 0x90000000
+ };
+
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .Times(1)
+ .RetiresOnSaturation();
+
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawElements(GL_POINTS, kCount, GL_UNSIGNED_INT, indices);
+
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest,
+ DrawElementsClientSideBuffersServiceSideIndices) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::BindBuffer bind_to_index;
+ cmds::GetMaxValueInBufferCHROMIUM get_max;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawElements draw;
+ cmds::BindBuffer restore;
+ };
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kClientIndexBufferId = 0x789;
+ const GLuint kIndexOffset = 0x40;
+ const GLuint kMaxIndex = 2;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ arraysize(verts) * kNumComponents2 * sizeof(verts[0][0]);
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedResultMemory(sizeof(uint32));
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kSize2);
+
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.bind_to_index.Init(GL_ELEMENT_ARRAY_BUFFER, kClientIndexBufferId);
+ expected.get_max.Init(kClientIndexBufferId, kCount, GL_UNSIGNED_SHORT,
+ kIndexOffset, mem1.id, mem1.offset);
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem2.id, mem2.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem3.id, mem3.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kCount, GL_UNSIGNED_SHORT, kIndexOffset);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(mem1.ptr,kMaxIndex))
+ .RetiresOnSaturation();
+
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, kClientIndexBufferId);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->DrawElements(GL_POINTS, kCount, GL_UNSIGNED_SHORT,
+ reinterpret_cast<const void*>(kIndexOffset));
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawElementsInstancedANGLEClientSideBuffers) {
+ static const float verts[][4] = {
+ { 12.0f, 23.0f, 34.0f, 45.0f, },
+ { 56.0f, 67.0f, 78.0f, 89.0f, },
+ { 13.0f, 24.0f, 35.0f, 46.0f, },
+ };
+ static const uint16 indices[] = {
+ 1, 2,
+ };
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable1;
+ cmds::EnableVertexAttribArray enable2;
+ cmds::VertexAttribDivisorANGLE divisor;
+ cmds::BindBuffer bind_to_index_emu;
+ cmds::BufferData set_index_size;
+ cmds::BufferSubData copy_data0;
+ cmd::SetToken set_token0;
+ cmds::BindBuffer bind_to_emu;
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::VertexAttribPointer set_pointer1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ cmds::VertexAttribPointer set_pointer2;
+ cmds::DrawElementsInstancedANGLE draw;
+ cmds::BindBuffer restore;
+ cmds::BindBuffer restore_element;
+ };
+ const GLsizei kIndexSize = sizeof(indices);
+ const GLuint kEmuBufferId = GLES2Implementation::kClientSideArrayId;
+ const GLuint kEmuIndexBufferId =
+ GLES2Implementation::kClientSideElementArrayId;
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kClientStride = sizeof(verts[0]);
+ const GLsizei kCount = 2;
+ const GLsizei kSize1 =
+ arraysize(verts) * kNumComponents1 * sizeof(verts[0][0]);
+ const GLsizei kSize2 =
+ 1 * kNumComponents2 * sizeof(verts[0][0]);
+ const GLuint kDivisor = 1;
+ const GLsizei kEmuOffset1 = 0;
+ const GLsizei kEmuOffset2 = kSize1;
+ const GLsizei kTotalSize = kSize1 + kSize2;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kIndexSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kSize1);
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(kSize2);
+
+ Cmds expected;
+ expected.enable1.Init(kAttribIndex1);
+ expected.enable2.Init(kAttribIndex2);
+ expected.divisor.Init(kAttribIndex2, kDivisor);
+ expected.bind_to_index_emu.Init(GL_ELEMENT_ARRAY_BUFFER, kEmuIndexBufferId);
+ expected.set_index_size.Init(
+ GL_ELEMENT_ARRAY_BUFFER, kIndexSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data0.Init(
+ GL_ELEMENT_ARRAY_BUFFER, 0, kIndexSize, mem1.id, mem1.offset);
+ expected.set_token0.Init(GetNextToken());
+ expected.bind_to_emu.Init(GL_ARRAY_BUFFER, kEmuBufferId);
+ expected.set_size.Init(GL_ARRAY_BUFFER, kTotalSize, 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, kEmuOffset1, kSize1, mem2.id, mem2.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_pointer1.Init(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, 0, kEmuOffset1);
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kEmuOffset2, kSize2, mem3.id, mem3.offset);
+ expected.set_token2.Init(GetNextToken());
+ expected.set_pointer2.Init(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, 0, kEmuOffset2);
+ expected.draw.Init(GL_POINTS, kCount, GL_UNSIGNED_SHORT, 0, 1);
+ expected.restore.Init(GL_ARRAY_BUFFER, 0);
+ expected.restore_element.Init(GL_ELEMENT_ARRAY_BUFFER, 0);
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ gl_->EnableVertexAttribArray(kAttribIndex2);
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kClientStride, verts);
+ gl_->VertexAttribDivisorANGLE(kAttribIndex2, kDivisor);
+ gl_->DrawElementsInstancedANGLE(
+ GL_POINTS, kCount, GL_UNSIGNED_SHORT, indices, 1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GetVertexBufferPointerv) {
+ static const float verts[1] = { 0.0f, };
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kStride1 = 12;
+ const GLsizei kStride2 = 0;
+ const GLuint kBufferId = 0x123;
+ const GLint kOffset2 = 0x456;
+
+ // It's all cached on the client side so no get commands are issued.
+ struct Cmds {
+ cmds::BindBuffer bind;
+ cmds::VertexAttribPointer set_pointer;
+ };
+
+ Cmds expected;
+ expected.bind.Init(GL_ARRAY_BUFFER, kBufferId);
+ expected.set_pointer.Init(kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE,
+ kStride2, kOffset2);
+
+ // Set one client side buffer.
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kStride1, verts);
+ // Set one VBO
+ gl_->BindBuffer(GL_ARRAY_BUFFER, kBufferId);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kStride2,
+ reinterpret_cast<const void*>(kOffset2));
+ // now get them both.
+ void* ptr1 = NULL;
+ void* ptr2 = NULL;
+
+ gl_->GetVertexAttribPointerv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr1);
+ gl_->GetVertexAttribPointerv(
+ kAttribIndex2, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr2);
+
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(static_cast<const void*>(&verts) == ptr1);
+ EXPECT_TRUE(ptr2 == reinterpret_cast<void*>(kOffset2));
+}
+
+TEST_F(GLES2ImplementationTest, GetVertexAttrib) {
+ static const float verts[1] = { 0.0f, };
+ const GLuint kAttribIndex1 = 1;
+ const GLuint kAttribIndex2 = 3;
+ const GLint kNumComponents1 = 3;
+ const GLint kNumComponents2 = 2;
+ const GLsizei kStride1 = 12;
+ const GLsizei kStride2 = 0;
+ const GLuint kBufferId = 0x123;
+ const GLint kOffset2 = 0x456;
+
+ // Only one set and one get because the client side buffer's info is stored
+ // on the client side.
+ struct Cmds {
+ cmds::EnableVertexAttribArray enable;
+ cmds::BindBuffer bind;
+ cmds::VertexAttribPointer set_pointer;
+ cmds::GetVertexAttribfv get2; // for getting the value from attrib1
+ };
+
+ ExpectedMemoryInfo mem2 = GetExpectedResultMemory(16);
+
+ Cmds expected;
+ expected.enable.Init(kAttribIndex1);
+ expected.bind.Init(GL_ARRAY_BUFFER, kBufferId);
+ expected.set_pointer.Init(kAttribIndex2, kNumComponents2, GL_FLOAT, GL_FALSE,
+ kStride2, kOffset2);
+ expected.get2.Init(kAttribIndex1,
+ GL_CURRENT_VERTEX_ATTRIB,
+ mem2.id, mem2.offset);
+
+ FourFloats current_attrib(1.2f, 3.4f, 5.6f, 7.8f);
+
+ // One call to flush to wait for last call to GetVertexAttribiv
+ // as others are all cached.
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(
+ mem2.ptr, SizedResultHelper<FourFloats>(current_attrib)))
+ .RetiresOnSaturation();
+
+ gl_->EnableVertexAttribArray(kAttribIndex1);
+ // Set one client side buffer.
+ gl_->VertexAttribPointer(kAttribIndex1, kNumComponents1,
+ GL_FLOAT, GL_FALSE, kStride1, verts);
+ // Set one VBO
+ gl_->BindBuffer(GL_ARRAY_BUFFER, kBufferId);
+ gl_->VertexAttribPointer(kAttribIndex2, kNumComponents2,
+ GL_FLOAT, GL_FALSE, kStride2,
+ reinterpret_cast<const void*>(kOffset2));
+ // first get the service side once to see that we make a command
+ GLint buffer_id = 0;
+ GLint enabled = 0;
+ GLint size = 0;
+ GLint stride = 0;
+ GLint type = 0;
+ GLint normalized = 1;
+ float current[4] = { 0.0f, };
+
+ gl_->GetVertexAttribiv(
+ kAttribIndex2, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, &buffer_id);
+ EXPECT_EQ(kBufferId, static_cast<GLuint>(buffer_id));
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, &buffer_id);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_ENABLED, &enabled);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_SIZE, &size);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_STRIDE, &stride);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_TYPE, &type);
+ gl_->GetVertexAttribiv(
+ kAttribIndex1, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, &normalized);
+ gl_->GetVertexAttribfv(
+ kAttribIndex1, GL_CURRENT_VERTEX_ATTRIB, ¤t[0]);
+
+ EXPECT_EQ(0, buffer_id);
+ EXPECT_EQ(GL_TRUE, enabled);
+ EXPECT_EQ(kNumComponents1, size);
+ EXPECT_EQ(kStride1, stride);
+ EXPECT_EQ(GL_FLOAT, type);
+ EXPECT_EQ(GL_FALSE, normalized);
+ EXPECT_EQ(0, memcmp(¤t_attrib, ¤t, sizeof(current_attrib)));
+
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ReservedIds) {
+ // Only the get error command should be issued.
+ struct Cmds {
+ cmds::GetError get;
+ };
+ Cmds expected;
+
+ ExpectedMemoryInfo mem1 = GetExpectedResultMemory(
+ sizeof(cmds::GetError::Result));
+
+ expected.get.Init(mem1.id, mem1.offset);
+
+ // One call to flush to wait for GetError
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(mem1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ gl_->BindBuffer(
+ GL_ARRAY_BUFFER,
+ GLES2Implementation::kClientSideArrayId);
+ gl_->BindBuffer(
+ GL_ARRAY_BUFFER,
+ GLES2Implementation::kClientSideElementArrayId);
+ GLenum err = gl_->GetError();
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), err);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+#endif // defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+
+TEST_F(GLES2ImplementationTest, ReadPixels2Reads) {
+ struct Cmds {
+ cmds::ReadPixels read1;
+ cmd::SetToken set_token1;
+ cmds::ReadPixels read2;
+ cmd::SetToken set_token2;
+ };
+ const GLint kBytesPerPixel = 4;
+ const GLint kWidth =
+ (kTransferBufferSize - GLES2Implementation::kStartingOffset) /
+ kBytesPerPixel;
+ const GLint kHeight = 2;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+
+ ExpectedMemoryInfo mem1 =
+ GetExpectedMemory(kWidth * kHeight / 2 * kBytesPerPixel);
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::ReadPixels::Result));
+ ExpectedMemoryInfo mem2 =
+ GetExpectedMemory(kWidth * kHeight / 2 * kBytesPerPixel);
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::ReadPixels::Result));
+
+ Cmds expected;
+ expected.read1.Init(
+ 0, 0, kWidth, kHeight / 2, kFormat, kType,
+ mem1.id, mem1.offset, result1.id, result1.offset,
+ false);
+ expected.set_token1.Init(GetNextToken());
+ expected.read2.Init(
+ 0, kHeight / 2, kWidth, kHeight / 2, kFormat, kType,
+ mem2.id, mem2.offset, result2.id, result2.offset, false);
+ expected.set_token2.Init(GetNextToken());
+ scoped_ptr<int8[]> buffer(new int8[kWidth * kHeight * kBytesPerPixel]);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, static_cast<uint32>(1)))
+ .WillOnce(SetMemory(result2.ptr, static_cast<uint32>(1)))
+ .RetiresOnSaturation();
+
+ gl_->ReadPixels(0, 0, kWidth, kHeight, kFormat, kType, buffer.get());
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ReadPixelsBadFormatType) {
+ struct Cmds {
+ cmds::ReadPixels read;
+ cmd::SetToken set_token;
+ };
+ const GLint kBytesPerPixel = 4;
+ const GLint kWidth = 2;
+ const GLint kHeight = 2;
+ const GLenum kFormat = 0;
+ const GLenum kType = 0;
+
+ ExpectedMemoryInfo mem1 =
+ GetExpectedMemory(kWidth * kHeight * kBytesPerPixel);
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::ReadPixels::Result));
+
+ Cmds expected;
+ expected.read.Init(
+ 0, 0, kWidth, kHeight, kFormat, kType,
+ mem1.id, mem1.offset, result1.id, result1.offset, false);
+ expected.set_token.Init(GetNextToken());
+ scoped_ptr<int8[]> buffer(new int8[kWidth * kHeight * kBytesPerPixel]);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .Times(1)
+ .RetiresOnSaturation();
+
+ gl_->ReadPixels(0, 0, kWidth, kHeight, kFormat, kType, buffer.get());
+}
+
+TEST_F(GLES2ImplementationTest, FreeUnusedSharedMemory) {
+ struct Cmds {
+ cmds::BufferSubData buf;
+ cmd::SetToken set_token;
+ };
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLintptr kOffset = 15;
+ const GLsizeiptr kSize = 16;
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kSize);
+
+ Cmds expected;
+ expected.buf.Init(
+ kTarget, kOffset, kSize, mem1.id, mem1.offset);
+ expected.set_token.Init(GetNextToken());
+
+ void* mem = gl_->MapBufferSubDataCHROMIUM(
+ kTarget, kOffset, kSize, GL_WRITE_ONLY);
+ ASSERT_TRUE(mem != NULL);
+ gl_->UnmapBufferSubDataCHROMIUM(mem);
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ gl_->FreeUnusedSharedMemory();
+}
+
+TEST_F(GLES2ImplementationTest, MapUnmapBufferSubDataCHROMIUM) {
+ struct Cmds {
+ cmds::BufferSubData buf;
+ cmd::SetToken set_token;
+ };
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLintptr kOffset = 15;
+ const GLsizeiptr kSize = 16;
+
+ uint32 offset = 0;
+ Cmds expected;
+ expected.buf.Init(
+ kTarget, kOffset, kSize,
+ command_buffer()->GetNextFreeTransferBufferId(), offset);
+ expected.set_token.Init(GetNextToken());
+
+ void* mem = gl_->MapBufferSubDataCHROMIUM(
+ kTarget, kOffset, kSize, GL_WRITE_ONLY);
+ ASSERT_TRUE(mem != NULL);
+ gl_->UnmapBufferSubDataCHROMIUM(mem);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MapUnmapBufferSubDataCHROMIUMBadArgs) {
+ const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER;
+ const GLintptr kOffset = 15;
+ const GLsizeiptr kSize = 16;
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result3 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result4 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ // Calls to flush to wait for GetError
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result3.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result4.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ void* mem;
+ mem = gl_->MapBufferSubDataCHROMIUM(kTarget, -1, kSize, GL_WRITE_ONLY);
+ ASSERT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapBufferSubDataCHROMIUM(kTarget, kOffset, -1, GL_WRITE_ONLY);
+ ASSERT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapBufferSubDataCHROMIUM(kTarget, kOffset, kSize, GL_READ_ONLY);
+ ASSERT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), gl_->GetError());
+ const char* kPtr = "something";
+ gl_->UnmapBufferSubDataCHROMIUM(kPtr);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest, MapUnmapTexSubImage2DCHROMIUM) {
+ struct Cmds {
+ cmds::TexSubImage2D tex;
+ cmd::SetToken set_token;
+ };
+ const GLint kLevel = 1;
+ const GLint kXOffset = 2;
+ const GLint kYOffset = 3;
+ const GLint kWidth = 4;
+ const GLint kHeight = 5;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+
+ uint32 offset = 0;
+ Cmds expected;
+ expected.tex.Init(
+ GL_TEXTURE_2D, kLevel, kXOffset, kYOffset, kWidth, kHeight, kFormat,
+ kType,
+ command_buffer()->GetNextFreeTransferBufferId(), offset, GL_FALSE);
+ expected.set_token.Init(GetNextToken());
+
+ void* mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ kYOffset,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ ASSERT_TRUE(mem != NULL);
+ gl_->UnmapTexSubImage2DCHROMIUM(mem);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MapUnmapTexSubImage2DCHROMIUMBadArgs) {
+ const GLint kLevel = 1;
+ const GLint kXOffset = 2;
+ const GLint kYOffset = 3;
+ const GLint kWidth = 4;
+ const GLint kHeight = 5;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result3 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result4 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result5 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result6 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result7 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ // Calls to flush to wait for GetError
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result3.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result4.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result5.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result6.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result7.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ void* mem;
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ -1,
+ kXOffset,
+ kYOffset,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ -1,
+ kYOffset,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ -1,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ kYOffset,
+ -1,
+ kHeight,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ kYOffset,
+ kWidth,
+ -1,
+ kFormat,
+ kType,
+ GL_WRITE_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ mem = gl_->MapTexSubImage2DCHROMIUM(
+ GL_TEXTURE_2D,
+ kLevel,
+ kXOffset,
+ kYOffset,
+ kWidth,
+ kHeight,
+ kFormat,
+ kType,
+ GL_READ_ONLY);
+ EXPECT_TRUE(mem == NULL);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), gl_->GetError());
+ const char* kPtr = "something";
+ gl_->UnmapTexSubImage2DCHROMIUM(kPtr);
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest, GetMultipleIntegervCHROMIUMValidArgs) {
+ const GLenum pnames[] = {
+ GL_DEPTH_WRITEMASK,
+ GL_COLOR_WRITEMASK,
+ GL_STENCIL_WRITEMASK,
+ };
+ const GLint num_results = 6;
+ GLint results[num_results + 1];
+ struct Cmds {
+ cmds::GetMultipleIntegervCHROMIUM get_multiple;
+ cmd::SetToken set_token;
+ };
+ const GLsizei kNumPnames = arraysize(pnames);
+ const GLsizeiptr kResultsSize = num_results * sizeof(results[0]);
+ const size_t kPNamesSize = kNumPnames * sizeof(pnames[0]);
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kPNamesSize + kResultsSize);
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(
+ sizeof(cmds::GetError::Result));
+
+ const uint32 kPnamesOffset = mem1.offset;
+ const uint32 kResultsOffset = mem1.offset + kPNamesSize;
+ Cmds expected;
+ expected.get_multiple.Init(
+ mem1.id, kPnamesOffset, kNumPnames,
+ mem1.id, kResultsOffset, kResultsSize);
+ expected.set_token.Init(GetNextToken());
+
+ const GLint kSentinel = 0x12345678;
+ memset(results, 0, sizeof(results));
+ results[num_results] = kSentinel;
+ const GLint returned_results[] = {
+ 1, 0, 1, 0, 1, -1,
+ };
+ // One call to flush to wait for results
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemoryFromArray(mem1.ptr + kPNamesSize,
+ returned_results, sizeof(returned_results)))
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(0, memcmp(&returned_results, results, sizeof(returned_results)));
+ EXPECT_EQ(kSentinel, results[num_results]);
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest, GetMultipleIntegervCHROMIUMBadArgs) {
+ GLenum pnames[] = {
+ GL_DEPTH_WRITEMASK,
+ GL_COLOR_WRITEMASK,
+ GL_STENCIL_WRITEMASK,
+ };
+ const GLint num_results = 6;
+ GLint results[num_results + 1];
+ const GLsizei kNumPnames = arraysize(pnames);
+ const GLsizeiptr kResultsSize = num_results * sizeof(results[0]);
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result3 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result4 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ // Calls to flush to wait for GetError
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result3.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result4.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ const GLint kSentinel = 0x12345678;
+ memset(results, 0, sizeof(results));
+ results[num_results] = kSentinel;
+ // try bad size.
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize + 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(kSentinel, results[num_results]);
+ // try bad size.
+ ClearCommands();
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize - 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(kSentinel, results[num_results]);
+ // try uncleared results.
+ ClearCommands();
+ results[2] = 1;
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(kSentinel, results[num_results]);
+ // try bad enum results.
+ ClearCommands();
+ results[2] = 0;
+ pnames[1] = GL_TRUE;
+ gl_->GetMultipleIntegervCHROMIUM(
+ &pnames[0], kNumPnames, &results[0], kResultsSize);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), gl_->GetError());
+ EXPECT_EQ(0, results[0]);
+ EXPECT_EQ(kSentinel, results[num_results]);
+}
+
+TEST_F(GLES2ImplementationTest, GetProgramInfoCHROMIUMGoodArgs) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const GLuint kProgramId = 123;
+ const char kBad = 0x12;
+ GLsizei size = 0;
+ const Str7 kString = {"foobar"};
+ char buf[20];
+
+ ExpectedMemoryInfo mem1 =
+ GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmd::GetBucketStart::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ memset(buf, kBad, sizeof(buf));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetProgramInfoCHROMIUM get_program_info;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_program_info.Init(kProgramId, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ gl_->GetProgramInfoCHROMIUM(kProgramId, sizeof(buf), &size, &buf);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+ EXPECT_EQ(sizeof(kString), static_cast<size_t>(size));
+ EXPECT_STREQ(kString.str, buf);
+ EXPECT_EQ(buf[sizeof(kString)], kBad);
+}
+
+TEST_F(GLES2ImplementationTest, GetProgramInfoCHROMIUMBadArgs) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const GLuint kProgramId = 123;
+ GLsizei size = 0;
+ const Str7 kString = {"foobar"};
+ char buf[20];
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmd::GetBucketStart::Result));
+ ExpectedMemoryInfo result2 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result3 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+ ExpectedMemoryInfo result4 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .WillOnce(SetMemory(result2.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result3.ptr, GLuint(GL_NO_ERROR)))
+ .WillOnce(SetMemory(result4.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+
+ // try bufsize not big enough.
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetProgramInfoCHROMIUM get_program_info;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_program_info.Init(kProgramId, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ gl_->GetProgramInfoCHROMIUM(kProgramId, 6, &size, &buf);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), gl_->GetError());
+ ClearCommands();
+
+ // try bad bufsize
+ gl_->GetProgramInfoCHROMIUM(kProgramId, -1, &size, &buf);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+ ClearCommands();
+ // try no size ptr.
+ gl_->GetProgramInfoCHROMIUM(kProgramId, sizeof(buf), NULL, &buf);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError());
+}
+
+// Test that things are cached
+TEST_F(GLES2ImplementationTest, GetIntegerCacheRead) {
+ struct PNameValue {
+ GLenum pname;
+ GLint expected;
+ };
+ const PNameValue pairs[] = {
+ {GL_ACTIVE_TEXTURE, GL_TEXTURE0, },
+ {GL_TEXTURE_BINDING_2D, 0, },
+ {GL_TEXTURE_BINDING_CUBE_MAP, 0, },
+ {GL_TEXTURE_BINDING_EXTERNAL_OES, 0, },
+ {GL_FRAMEBUFFER_BINDING, 0, },
+ {GL_RENDERBUFFER_BINDING, 0, },
+ {GL_ARRAY_BUFFER_BINDING, 0, },
+ {GL_ELEMENT_ARRAY_BUFFER_BINDING, 0, },
+ {GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, kMaxCombinedTextureImageUnits, },
+ {GL_MAX_CUBE_MAP_TEXTURE_SIZE, kMaxCubeMapTextureSize, },
+ {GL_MAX_FRAGMENT_UNIFORM_VECTORS, kMaxFragmentUniformVectors, },
+ {GL_MAX_RENDERBUFFER_SIZE, kMaxRenderbufferSize, },
+ {GL_MAX_TEXTURE_IMAGE_UNITS, kMaxTextureImageUnits, },
+ {GL_MAX_TEXTURE_SIZE, kMaxTextureSize, },
+ {GL_MAX_VARYING_VECTORS, kMaxVaryingVectors, },
+ {GL_MAX_VERTEX_ATTRIBS, kMaxVertexAttribs, },
+ {GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, kMaxVertexTextureImageUnits, },
+ {GL_MAX_VERTEX_UNIFORM_VECTORS, kMaxVertexUniformVectors, },
+ {GL_NUM_COMPRESSED_TEXTURE_FORMATS, kNumCompressedTextureFormats, },
+ {GL_NUM_SHADER_BINARY_FORMATS, kNumShaderBinaryFormats, }, };
+ size_t num_pairs = sizeof(pairs) / sizeof(pairs[0]);
+ for (size_t ii = 0; ii < num_pairs; ++ii) {
+ const PNameValue& pv = pairs[ii];
+ GLint v = -1;
+ gl_->GetIntegerv(pv.pname, &v);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(pv.expected, v);
+ }
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+}
+
+TEST_F(GLES2ImplementationTest, GetIntegerCacheWrite) {
+ struct PNameValue {
+ GLenum pname;
+ GLint expected;
+ };
+ gl_->ActiveTexture(GL_TEXTURE4);
+ gl_->BindBuffer(GL_ARRAY_BUFFER, 2);
+ gl_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 3);
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, 4);
+ gl_->BindRenderbuffer(GL_RENDERBUFFER, 5);
+ gl_->BindTexture(GL_TEXTURE_2D, 6);
+ gl_->BindTexture(GL_TEXTURE_CUBE_MAP, 7);
+ gl_->BindTexture(GL_TEXTURE_EXTERNAL_OES, 8);
+
+ const PNameValue pairs[] = {{GL_ACTIVE_TEXTURE, GL_TEXTURE4, },
+ {GL_ARRAY_BUFFER_BINDING, 2, },
+ {GL_ELEMENT_ARRAY_BUFFER_BINDING, 3, },
+ {GL_FRAMEBUFFER_BINDING, 4, },
+ {GL_RENDERBUFFER_BINDING, 5, },
+ {GL_TEXTURE_BINDING_2D, 6, },
+ {GL_TEXTURE_BINDING_CUBE_MAP, 7, },
+ {GL_TEXTURE_BINDING_EXTERNAL_OES, 8, }, };
+ size_t num_pairs = sizeof(pairs) / sizeof(pairs[0]);
+ for (size_t ii = 0; ii < num_pairs; ++ii) {
+ const PNameValue& pv = pairs[ii];
+ GLint v = -1;
+ gl_->GetIntegerv(pv.pname, &v);
+ EXPECT_EQ(pv.expected, v);
+ }
+
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetError::Result));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, GLuint(GL_NO_ERROR)))
+ .RetiresOnSaturation();
+ EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
+}
+
+static bool CheckRect(
+ int width, int height, GLenum format, GLenum type, int alignment,
+ bool flip_y, const uint8* r1, const uint8* r2) {
+ uint32 size = 0;
+ uint32 unpadded_row_size = 0;
+ uint32 padded_row_size = 0;
+ if (!GLES2Util::ComputeImageDataSizes(
+ width, height, format, type, alignment, &size, &unpadded_row_size,
+ &padded_row_size)) {
+ return false;
+ }
+
+ int r2_stride = flip_y ?
+ -static_cast<int>(padded_row_size) :
+ static_cast<int>(padded_row_size);
+ r2 = flip_y ? (r2 + (height - 1) * padded_row_size) : r2;
+
+ for (int y = 0; y < height; ++y) {
+ if (memcmp(r1, r2, unpadded_row_size) != 0) {
+ return false;
+ }
+ r1 += padded_row_size;
+ r2 += r2_stride;
+ }
+ return true;
+}
+
+ACTION_P8(CheckRectAction, width, height, format, type, alignment, flip_y,
+ r1, r2) {
+ EXPECT_TRUE(CheckRect(
+ width, height, format, type, alignment, flip_y, r1, r2));
+}
+
+// Test TexImage2D with and without flip_y
+TEST_F(GLES2ImplementationTest, TexImage2D) {
+ struct Cmds {
+ cmds::TexImage2D tex_image_2d;
+ cmd::SetToken set_token;
+ };
+ struct Cmds2 {
+ cmds::TexImage2D tex_image_2d;
+ cmd::SetToken set_token;
+ };
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLenum kFormat = GL_RGB;
+ const GLsizei kWidth = 3;
+ const GLsizei kHeight = 4;
+ const GLint kBorder = 0;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLint kPixelStoreUnpackAlignment = 4;
+ static uint8 pixels[] = {
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 101, 102, 103,
+ 21, 22, 23, 23, 24, 25, 25, 26, 27, 201, 202, 203,
+ 31, 32, 33, 33, 34, 35, 35, 36, 37, 123, 124, 125,
+ 41, 42, 43, 43, 44, 45, 45, 46, 47,
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(sizeof(pixels));
+
+ Cmds expected;
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ mem1.id, mem1.offset);
+ expected.set_token.Init(GetNextToken());
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight, kFormat, kType, kPixelStoreUnpackAlignment, false,
+ pixels, mem1.ptr));
+
+ ClearCommands();
+ gl_->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(sizeof(pixels));
+ Cmds2 expected2;
+ expected2.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ mem2.id, mem2.offset);
+ expected2.set_token.Init(GetNextToken());
+ const void* commands2 = GetPut();
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels);
+ EXPECT_EQ(0, memcmp(&expected2, commands2, sizeof(expected2)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight, kFormat, kType, kPixelStoreUnpackAlignment, true,
+ pixels, mem2.ptr));
+}
+
+// Test TexImage2D with 2 writes
+TEST_F(GLES2ImplementationTest, TexImage2D2Writes) {
+ struct Cmds {
+ cmds::TexImage2D tex_image_2d;
+ cmds::TexSubImage2D tex_sub_image_2d1;
+ cmd::SetToken set_token1;
+ cmds::TexSubImage2D tex_sub_image_2d2;
+ cmd::SetToken set_token2;
+ };
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLenum kFormat = GL_RGB;
+ const GLint kBorder = 0;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLint kPixelStoreUnpackAlignment = 4;
+ const GLsizei kWidth = 3;
+
+ uint32 size = 0;
+ uint32 unpadded_row_size = 0;
+ uint32 padded_row_size = 0;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ &size, &unpadded_row_size, &padded_row_size));
+ const GLsizei kHeight = (MaxTransferBufferSize() / padded_row_size) * 2;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight, kFormat, kType, kPixelStoreUnpackAlignment,
+ &size, NULL, NULL));
+ uint32 half_size = 0;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ &half_size, NULL, NULL));
+
+ scoped_ptr<uint8[]> pixels(new uint8[size]);
+ for (uint32 ii = 0; ii < size; ++ii) {
+ pixels[ii] = static_cast<uint8>(ii);
+ }
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(half_size);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(half_size);
+
+ Cmds expected;
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ 0, 0);
+ expected.tex_sub_image_2d1.Init(
+ kTarget, kLevel, 0, 0, kWidth, kHeight / 2, kFormat, kType,
+ mem1.id, mem1.offset, true);
+ expected.set_token1.Init(GetNextToken());
+ expected.tex_sub_image_2d2.Init(
+ kTarget, kLevel, 0, kHeight / 2, kWidth, kHeight / 2, kFormat, kType,
+ mem2.id, mem2.offset, true);
+ expected.set_token2.Init(GetNextToken());
+
+ // TODO(gman): Make it possible to run this test
+ // EXPECT_CALL(*command_buffer(), OnFlush())
+ // .WillOnce(CheckRectAction(
+ // kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ // false, pixels.get(),
+ // GetExpectedTransferAddressFromOffsetAs<uint8>(offset1, half_size)))
+ // .RetiresOnSaturation();
+
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels.get());
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment, false,
+ pixels.get() + kHeight / 2 * padded_row_size, mem2.ptr));
+
+ ClearCommands();
+ gl_->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ const void* commands2 = GetPut();
+ ExpectedMemoryInfo mem3 = GetExpectedMemory(half_size);
+ ExpectedMemoryInfo mem4 = GetExpectedMemory(half_size);
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ 0, 0);
+ expected.tex_sub_image_2d1.Init(
+ kTarget, kLevel, 0, kHeight / 2, kWidth, kHeight / 2, kFormat, kType,
+ mem3.id, mem3.offset, true);
+ expected.set_token1.Init(GetNextToken());
+ expected.tex_sub_image_2d2.Init(
+ kTarget, kLevel, 0, 0, kWidth, kHeight / 2, kFormat, kType,
+ mem4.id, mem4.offset, true);
+ expected.set_token2.Init(GetNextToken());
+
+ // TODO(gman): Make it possible to run this test
+ // EXPECT_CALL(*command_buffer(), OnFlush())
+ // .WillOnce(CheckRectAction(
+ // kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ // true, pixels.get(),
+ // GetExpectedTransferAddressFromOffsetAs<uint8>(offset3, half_size)))
+ // .RetiresOnSaturation();
+
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels.get());
+ EXPECT_EQ(0, memcmp(&expected, commands2, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight / 2, kFormat, kType, kPixelStoreUnpackAlignment, true,
+ pixels.get() + kHeight / 2 * padded_row_size, mem4.ptr));
+}
+
+// Test TexSubImage2D with GL_PACK_FLIP_Y set and partial multirow transfers
+TEST_F(GLES2ImplementationTest, TexSubImage2DFlipY) {
+ const GLsizei kTextureWidth = MaxTransferBufferSize() / 4;
+ const GLsizei kTextureHeight = 7;
+ const GLsizei kSubImageWidth = MaxTransferBufferSize() / 8;
+ const GLsizei kSubImageHeight = 4;
+ const GLint kSubImageXOffset = 1;
+ const GLint kSubImageYOffset = 2;
+ const GLenum kFormat = GL_RGBA;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLint kBorder = 0;
+ const GLint kPixelStoreUnpackAlignment = 4;
+
+ struct Cmds {
+ cmds::PixelStorei pixel_store_i1;
+ cmds::TexImage2D tex_image_2d;
+ cmds::PixelStorei pixel_store_i2;
+ cmds::TexSubImage2D tex_sub_image_2d1;
+ cmd::SetToken set_token1;
+ cmds::TexSubImage2D tex_sub_image_2d2;
+ cmd::SetToken set_token2;
+ };
+
+ uint32 sub_2_high_size = 0;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kSubImageWidth, 2, kFormat, kType, kPixelStoreUnpackAlignment,
+ &sub_2_high_size, NULL, NULL));
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(sub_2_high_size);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(sub_2_high_size);
+
+ Cmds expected;
+ expected.pixel_store_i1.Init(GL_UNPACK_ALIGNMENT, kPixelStoreUnpackAlignment);
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kTextureWidth, kTextureHeight, kFormat,
+ kType, 0, 0);
+ expected.pixel_store_i2.Init(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ expected.tex_sub_image_2d1.Init(kTarget, kLevel, kSubImageXOffset,
+ kSubImageYOffset + 2, kSubImageWidth, 2, kFormat, kType,
+ mem1.id, mem1.offset, false);
+ expected.set_token1.Init(GetNextToken());
+ expected.tex_sub_image_2d2.Init(kTarget, kLevel, kSubImageXOffset,
+ kSubImageYOffset, kSubImageWidth , 2, kFormat, kType,
+ mem2.id, mem2.offset, false);
+ expected.set_token2.Init(GetNextToken());
+
+ gl_->PixelStorei(GL_UNPACK_ALIGNMENT, kPixelStoreUnpackAlignment);
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kTextureWidth, kTextureHeight, kBorder, kFormat,
+ kType, NULL);
+ gl_->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, GL_TRUE);
+ scoped_ptr<uint32[]> pixels(new uint32[kSubImageWidth * kSubImageHeight]);
+ for (int y = 0; y < kSubImageHeight; ++y) {
+ for (int x = 0; x < kSubImageWidth; ++x) {
+ pixels.get()[kSubImageWidth * y + x] = x | (y << 16);
+ }
+ }
+ gl_->TexSubImage2D(
+ GL_TEXTURE_2D, 0, kSubImageXOffset, kSubImageYOffset, kSubImageWidth,
+ kSubImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels.get());
+
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kSubImageWidth, 2, kFormat, kType, kPixelStoreUnpackAlignment, true,
+ reinterpret_cast<uint8*>(pixels.get() + 2 * kSubImageWidth),
+ mem2.ptr));
+}
+
+TEST_F(GLES2ImplementationTest, SubImageUnpack) {
+ static const GLint unpack_alignments[] = { 1, 2, 4, 8 };
+
+ static const GLenum kFormat = GL_RGB;
+ static const GLenum kType = GL_UNSIGNED_BYTE;
+ static const GLint kLevel = 0;
+ static const GLint kBorder = 0;
+ // We're testing using the unpack params to pull a subimage out of a larger
+ // source of pixels. Here we specify the subimage by its border rows /
+ // columns.
+ static const GLint kSrcWidth = 33;
+ static const GLint kSrcSubImageX0 = 11;
+ static const GLint kSrcSubImageX1 = 20;
+ static const GLint kSrcSubImageY0 = 18;
+ static const GLint kSrcSubImageY1 = 23;
+ static const GLint kSrcSubImageWidth = kSrcSubImageX1 - kSrcSubImageX0;
+ static const GLint kSrcSubImageHeight = kSrcSubImageY1 - kSrcSubImageY0;
+
+ // these are only used in the texsubimage tests
+ static const GLint kTexWidth = 1023;
+ static const GLint kTexHeight = 511;
+ static const GLint kTexSubXOffset = 419;
+ static const GLint kTexSubYOffset = 103;
+
+ struct {
+ cmds::PixelStorei pixel_store_i;
+ cmds::PixelStorei pixel_store_i2;
+ cmds::TexImage2D tex_image_2d;
+ } texImageExpected;
+
+ struct {
+ cmds::PixelStorei pixel_store_i;
+ cmds::PixelStorei pixel_store_i2;
+ cmds::TexImage2D tex_image_2d;
+ cmds::TexSubImage2D tex_sub_image_2d;
+ } texSubImageExpected;
+
+ uint32 src_size;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kSrcWidth, kSrcSubImageY1, kFormat, kType, 8, &src_size, NULL, NULL));
+ scoped_ptr<uint8[]> src_pixels;
+ src_pixels.reset(new uint8[src_size]);
+ for (size_t i = 0; i < src_size; ++i) {
+ src_pixels[i] = static_cast<int8>(i);
+ }
+
+ for (int sub = 0; sub < 2; ++sub) {
+ for (int flip_y = 0; flip_y < 2; ++flip_y) {
+ for (size_t a = 0; a < arraysize(unpack_alignments); ++a) {
+ GLint alignment = unpack_alignments[a];
+ uint32 size;
+ uint32 unpadded_row_size;
+ uint32 padded_row_size;
+ ASSERT_TRUE(GLES2Util::ComputeImageDataSizes(
+ kSrcSubImageWidth, kSrcSubImageHeight, kFormat, kType, alignment,
+ &size, &unpadded_row_size, &padded_row_size));
+ ASSERT_TRUE(size <= MaxTransferBufferSize());
+ ExpectedMemoryInfo mem = GetExpectedMemory(size);
+
+ const void* commands = GetPut();
+ gl_->PixelStorei(GL_UNPACK_ALIGNMENT, alignment);
+ gl_->PixelStorei(GL_UNPACK_ROW_LENGTH_EXT, kSrcWidth);
+ gl_->PixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, kSrcSubImageX0);
+ gl_->PixelStorei(GL_UNPACK_SKIP_ROWS_EXT, kSrcSubImageY0);
+ gl_->PixelStorei(GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
+ if (sub) {
+ gl_->TexImage2D(
+ GL_TEXTURE_2D, kLevel, kFormat, kTexWidth, kTexHeight, kBorder,
+ kFormat, kType, NULL);
+ gl_->TexSubImage2D(
+ GL_TEXTURE_2D, kLevel, kTexSubXOffset, kTexSubYOffset,
+ kSrcSubImageWidth, kSrcSubImageHeight, kFormat, kType,
+ src_pixels.get());
+ texSubImageExpected.pixel_store_i.Init(
+ GL_UNPACK_ALIGNMENT, alignment);
+ texSubImageExpected.pixel_store_i2.Init(
+ GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
+ texSubImageExpected.tex_image_2d.Init(
+ GL_TEXTURE_2D, kLevel, kFormat, kTexWidth, kTexHeight,
+ kFormat, kType, 0, 0);
+ texSubImageExpected.tex_sub_image_2d.Init(
+ GL_TEXTURE_2D, kLevel, kTexSubXOffset, kTexSubYOffset,
+ kSrcSubImageWidth, kSrcSubImageHeight, kFormat, kType, mem.id,
+ mem.offset, GL_FALSE);
+ EXPECT_EQ(0, memcmp(
+ &texSubImageExpected, commands, sizeof(texSubImageExpected)));
+ } else {
+ gl_->TexImage2D(
+ GL_TEXTURE_2D, kLevel, kFormat,
+ kSrcSubImageWidth, kSrcSubImageHeight, kBorder, kFormat, kType,
+ src_pixels.get());
+ texImageExpected.pixel_store_i.Init(GL_UNPACK_ALIGNMENT, alignment);
+ texImageExpected.pixel_store_i2.Init(
+ GL_UNPACK_FLIP_Y_CHROMIUM, flip_y);
+ texImageExpected.tex_image_2d.Init(
+ GL_TEXTURE_2D, kLevel, kFormat, kSrcSubImageWidth,
+ kSrcSubImageHeight, kFormat, kType, mem.id, mem.offset);
+ EXPECT_EQ(0, memcmp(
+ &texImageExpected, commands, sizeof(texImageExpected)));
+ }
+ uint32 src_padded_row_size;
+ ASSERT_TRUE(GLES2Util::ComputeImagePaddedRowSize(
+ kSrcWidth, kFormat, kType, alignment, &src_padded_row_size));
+ uint32 bytes_per_group = GLES2Util::ComputeImageGroupSize(
+ kFormat, kType);
+ for (int y = 0; y < kSrcSubImageHeight; ++y) {
+ GLint src_sub_y = flip_y ? kSrcSubImageHeight - y - 1 : y;
+ const uint8* src_row = src_pixels.get() +
+ (kSrcSubImageY0 + src_sub_y) * src_padded_row_size +
+ bytes_per_group * kSrcSubImageX0;
+ const uint8* dst_row = mem.ptr + y * padded_row_size;
+ EXPECT_EQ(0, memcmp(src_row, dst_row, unpadded_row_size));
+ }
+ ClearCommands();
+ }
+ }
+ }
+}
+
+// Test texture related calls with invalid arguments.
+TEST_F(GLES2ImplementationTest, TextureInvalidArguments) {
+ struct Cmds {
+ cmds::TexImage2D tex_image_2d;
+ cmd::SetToken set_token;
+ };
+ const GLenum kTarget = GL_TEXTURE_2D;
+ const GLint kLevel = 0;
+ const GLenum kFormat = GL_RGB;
+ const GLsizei kWidth = 3;
+ const GLsizei kHeight = 4;
+ const GLint kBorder = 0;
+ const GLint kInvalidBorder = 1;
+ const GLenum kType = GL_UNSIGNED_BYTE;
+ const GLint kPixelStoreUnpackAlignment = 4;
+ static uint8 pixels[] = {
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 101, 102, 103,
+ 21, 22, 23, 23, 24, 25, 25, 26, 27, 201, 202, 203,
+ 31, 32, 33, 33, 34, 35, 35, 36, 37, 123, 124, 125,
+ 41, 42, 43, 43, 44, 45, 45, 46, 47,
+ };
+
+ // Verify that something works.
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(sizeof(pixels));
+
+ Cmds expected;
+ expected.tex_image_2d.Init(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kFormat, kType,
+ mem1.id, mem1.offset);
+ expected.set_token.Init(GetNextToken());
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kBorder, kFormat, kType,
+ pixels);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(CheckRect(
+ kWidth, kHeight, kFormat, kType, kPixelStoreUnpackAlignment, false,
+ pixels, mem1.ptr));
+
+ ClearCommands();
+
+ // Use invalid border.
+ gl_->TexImage2D(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kInvalidBorder, kFormat, kType,
+ pixels);
+
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+
+ ClearCommands();
+
+ gl_->AsyncTexImage2DCHROMIUM(
+ kTarget, kLevel, kFormat, kWidth, kHeight, kInvalidBorder, kFormat, kType,
+ NULL);
+
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+
+ ClearCommands();
+
+ // Checking for CompressedTexImage2D argument validation is a bit tricky due
+ // to (runtime-detected) compression formats. Try to infer the error with an
+ // aux check.
+ const GLenum kCompressedFormat = GL_ETC1_RGB8_OES;
+ gl_->CompressedTexImage2D(
+ kTarget, kLevel, kCompressedFormat, kWidth, kHeight, kBorder,
+ arraysize(pixels), pixels);
+
+ // In the above, kCompressedFormat and arraysize(pixels) are possibly wrong
+ // values. First ensure that these do not cause failures at the client. If
+ // this check ever fails, it probably means that client checks more than at
+ // the time of writing of this test. In this case, more code needs to be
+ // written for this test.
+ EXPECT_FALSE(NoCommandsWritten());
+
+ ClearCommands();
+
+ // Changing border to invalid border should make the call fail at the client
+ // checks.
+ gl_->CompressedTexImage2D(
+ kTarget, kLevel, kCompressedFormat, kWidth, kHeight, kInvalidBorder,
+ arraysize(pixels), pixels);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+
+// Binds can not be cached with bind_generates_resource = false because
+// our id might not be valid. More specifically if you bind on contextA then
+// delete on contextB the resource is still bound on contextA but GetInterger
+// won't return an id.
+TEST_F(GLES2ImplementationStrictSharedTest, BindsNotCached) {
+ struct PNameValue {
+ GLenum pname;
+ GLint expected;
+ };
+ const PNameValue pairs[] = {{GL_TEXTURE_BINDING_2D, 1, },
+ {GL_TEXTURE_BINDING_CUBE_MAP, 2, },
+ {GL_TEXTURE_BINDING_EXTERNAL_OES, 3, },
+ {GL_FRAMEBUFFER_BINDING, 4, },
+ {GL_RENDERBUFFER_BINDING, 5, },
+ {GL_ARRAY_BUFFER_BINDING, 6, },
+ {GL_ELEMENT_ARRAY_BUFFER_BINDING, 7, }, };
+ size_t num_pairs = sizeof(pairs) / sizeof(pairs[0]);
+ for (size_t ii = 0; ii < num_pairs; ++ii) {
+ const PNameValue& pv = pairs[ii];
+ GLint v = -1;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::GetIntegerv::Result));
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr,
+ SizedResultHelper<GLuint>(pv.expected)))
+ .RetiresOnSaturation();
+ gl_->GetIntegerv(pv.pname, &v);
+ EXPECT_EQ(pv.expected, v);
+ }
+}
+
+// glGen* Ids must not be reused until glDelete* commands have been
+// flushed by glFlush.
+TEST_F(GLES2ImplementationStrictSharedTest, FlushGenerationTestBuffers) {
+ FlushGenerationTest<GenBuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest, FlushGenerationTestFramebuffers) {
+ FlushGenerationTest<GenFramebuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest, FlushGenerationTestRenderbuffers) {
+ FlushGenerationTest<GenRenderbuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest, FlushGenerationTestTextures) {
+ FlushGenerationTest<GenTexturesAPI>();
+}
+
+// glGen* Ids must not be reused cross-context until glDelete* commands are
+// flushed by glFlush, and the Ids are lazily freed after.
+TEST_F(GLES2ImplementationStrictSharedTest, CrossContextGenerationTestBuffers) {
+ CrossContextGenerationTest<GenBuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationTestFramebuffers) {
+ CrossContextGenerationTest<GenFramebuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationTestRenderbuffers) {
+ CrossContextGenerationTest<GenRenderbuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationTestTextures) {
+ CrossContextGenerationTest<GenTexturesAPI>();
+}
+
+// Test Delete which causes auto flush. Tests a regression case that occurred
+// in testing.
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationAutoFlushTestBuffers) {
+ CrossContextGenerationAutoFlushTest<GenBuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationAutoFlushTestFramebuffers) {
+ CrossContextGenerationAutoFlushTest<GenFramebuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationAutoFlushTestRenderbuffers) {
+ CrossContextGenerationAutoFlushTest<GenRenderbuffersAPI>();
+}
+TEST_F(GLES2ImplementationStrictSharedTest,
+ CrossContextGenerationAutoFlushTestTextures) {
+ CrossContextGenerationAutoFlushTest<GenTexturesAPI>();
+}
+
+TEST_F(GLES2ImplementationTest, GetString) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const Str7 kString = {"foobar"};
+ // GL_CHROMIUM_map_sub GL_CHROMIUM_flipy are hard coded into
+ // GLES2Implementation.
+ const char* expected_str =
+ "foobar "
+ "GL_CHROMIUM_flipy "
+ "GL_EXT_unpack_subimage "
+ "GL_CHROMIUM_map_sub";
+ const char kBad = 0x12;
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetString get_string;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ };
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmd::GetBucketStart::Result));
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_string.Init(GL_EXTENSIONS, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ char buf[sizeof(kString) + 1];
+ memset(buf, kBad, sizeof(buf));
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .RetiresOnSaturation();
+
+ const GLubyte* result = gl_->GetString(GL_EXTENSIONS);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_STREQ(expected_str, reinterpret_cast<const char*>(result));
+}
+
+TEST_F(GLES2ImplementationTest, PixelStoreiGLPackReverseRowOrderANGLE) {
+ const uint32 kBucketId = GLES2Implementation::kResultBucketId;
+ const Str7 kString = {"foobar"};
+ struct Cmds {
+ cmd::SetBucketSize set_bucket_size1;
+ cmds::GetString get_string;
+ cmd::GetBucketStart get_bucket_start;
+ cmd::SetToken set_token1;
+ cmd::SetBucketSize set_bucket_size2;
+ cmds::PixelStorei pixel_store;
+ };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(MaxTransferBufferSize());
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmd::GetBucketStart::Result));
+
+ Cmds expected;
+ expected.set_bucket_size1.Init(kBucketId, 0);
+ expected.get_string.Init(GL_EXTENSIONS, kBucketId);
+ expected.get_bucket_start.Init(
+ kBucketId, result1.id, result1.offset,
+ MaxTransferBufferSize(), mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.set_bucket_size2.Init(kBucketId, 0);
+ expected.pixel_store.Init(GL_PACK_REVERSE_ROW_ORDER_ANGLE, 1);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(DoAll(SetMemory(result1.ptr, uint32(sizeof(kString))),
+ SetMemory(mem1.ptr, kString)))
+ .RetiresOnSaturation();
+
+ gl_->PixelStorei(GL_PACK_REVERSE_ROW_ORDER_ANGLE, 1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CreateProgram) {
+ struct Cmds {
+ cmds::CreateProgram cmd;
+ };
+
+ Cmds expected;
+ expected.cmd.Init(kProgramsAndShadersStartId);
+ GLuint id = gl_->CreateProgram();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kProgramsAndShadersStartId, id);
+}
+
+TEST_F(GLES2ImplementationTest, BufferDataLargerThanTransferBuffer) {
+ struct Cmds {
+ cmds::BufferData set_size;
+ cmds::BufferSubData copy_data1;
+ cmd::SetToken set_token1;
+ cmds::BufferSubData copy_data2;
+ cmd::SetToken set_token2;
+ };
+ const unsigned kUsableSize =
+ kTransferBufferSize - GLES2Implementation::kStartingOffset;
+ uint8 buf[kUsableSize * 2] = { 0, };
+
+ ExpectedMemoryInfo mem1 = GetExpectedMemory(kUsableSize);
+ ExpectedMemoryInfo mem2 = GetExpectedMemory(kUsableSize);
+
+ Cmds expected;
+ expected.set_size.Init(
+ GL_ARRAY_BUFFER, arraysize(buf), 0, 0, GL_DYNAMIC_DRAW);
+ expected.copy_data1.Init(
+ GL_ARRAY_BUFFER, 0, kUsableSize, mem1.id, mem1.offset);
+ expected.set_token1.Init(GetNextToken());
+ expected.copy_data2.Init(
+ GL_ARRAY_BUFFER, kUsableSize, kUsableSize, mem2.id, mem2.offset);
+ expected.set_token2.Init(GetNextToken());
+ gl_->BufferData(GL_ARRAY_BUFFER, arraysize(buf), buf, GL_DYNAMIC_DRAW);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CapabilitiesAreCached) {
+ static const GLenum kStates[] = {
+ GL_DITHER,
+ GL_BLEND,
+ GL_CULL_FACE,
+ GL_DEPTH_TEST,
+ GL_POLYGON_OFFSET_FILL,
+ GL_SAMPLE_ALPHA_TO_COVERAGE,
+ GL_SAMPLE_COVERAGE,
+ GL_SCISSOR_TEST,
+ GL_STENCIL_TEST,
+ };
+ struct Cmds {
+ cmds::Enable enable_cmd;
+ };
+ Cmds expected;
+
+ for (size_t ii = 0; ii < arraysize(kStates); ++ii) {
+ GLenum state = kStates[ii];
+ expected.enable_cmd.Init(state);
+ GLboolean result = gl_->IsEnabled(state);
+ EXPECT_EQ(static_cast<GLboolean>(ii == 0), result);
+ EXPECT_TRUE(NoCommandsWritten());
+ const void* commands = GetPut();
+ if (!result) {
+ gl_->Enable(state);
+ EXPECT_EQ(0, memcmp(&expected, commands, sizeof(expected)));
+ }
+ ClearCommands();
+ result = gl_->IsEnabled(state);
+ EXPECT_TRUE(result);
+ EXPECT_TRUE(NoCommandsWritten());
+ }
+}
+
+TEST_F(GLES2ImplementationTest, BindVertexArrayOES) {
+ GLuint id = 0;
+ gl_->GenVertexArraysOES(1, &id);
+ ClearCommands();
+
+ struct Cmds {
+ cmds::BindVertexArrayOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(id);
+
+ const void* commands = GetPut();
+ gl_->BindVertexArrayOES(id);
+ EXPECT_EQ(0, memcmp(&expected, commands, sizeof(expected)));
+ ClearCommands();
+ gl_->BindVertexArrayOES(id);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, BeginEndQueryEXT) {
+ // Test GetQueryivEXT returns 0 if no current query.
+ GLint param = -1;
+ gl_->GetQueryivEXT(GL_ANY_SAMPLES_PASSED_EXT, GL_CURRENT_QUERY_EXT, ¶m);
+ EXPECT_EQ(0, param);
+
+ GLuint expected_ids[2] = { 1, 2 }; // These must match what's actually genned.
+ struct GenCmds {
+ cmds::GenQueriesEXTImmediate gen;
+ GLuint data[2];
+ };
+ GenCmds expected_gen_cmds;
+ expected_gen_cmds.gen.Init(arraysize(expected_ids), &expected_ids[0]);
+ GLuint ids[arraysize(expected_ids)] = { 0, };
+ gl_->GenQueriesEXT(arraysize(expected_ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(
+ &expected_gen_cmds, commands_, sizeof(expected_gen_cmds)));
+ GLuint id1 = ids[0];
+ GLuint id2 = ids[1];
+ ClearCommands();
+
+ // Test BeginQueryEXT fails if id = 0.
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, 0);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test BeginQueryEXT inserts command.
+ struct BeginCmds {
+ cmds::BeginQueryEXT begin_query;
+ };
+ BeginCmds expected_begin_cmds;
+ const void* commands = GetPut();
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, id1);
+ QueryTracker::Query* query = GetQuery(id1);
+ ASSERT_TRUE(query != NULL);
+ expected_begin_cmds.begin_query.Init(
+ GL_ANY_SAMPLES_PASSED_EXT, id1, query->shm_id(), query->shm_offset());
+ EXPECT_EQ(0, memcmp(
+ &expected_begin_cmds, commands, sizeof(expected_begin_cmds)));
+ ClearCommands();
+
+ // Test GetQueryivEXT returns id.
+ param = -1;
+ gl_->GetQueryivEXT(GL_ANY_SAMPLES_PASSED_EXT, GL_CURRENT_QUERY_EXT, ¶m);
+ EXPECT_EQ(id1, static_cast<GLuint>(param));
+ gl_->GetQueryivEXT(
+ GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT, GL_CURRENT_QUERY_EXT, ¶m);
+ EXPECT_EQ(0, param);
+
+ // Test BeginQueryEXT fails if between Begin/End.
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, id2);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test EndQueryEXT fails if target not same as current query.
+ ClearCommands();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test EndQueryEXT sends command
+ struct EndCmds {
+ cmds::EndQueryEXT end_query;
+ };
+ EndCmds expected_end_cmds;
+ expected_end_cmds.end_query.Init(
+ GL_ANY_SAMPLES_PASSED_EXT, query->submit_count());
+ commands = GetPut();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
+ EXPECT_EQ(0, memcmp(
+ &expected_end_cmds, commands, sizeof(expected_end_cmds)));
+
+ // Test EndQueryEXT fails if no current query.
+ ClearCommands();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test 2nd Begin/End increments count.
+ base::subtle::Atomic32 old_submit_count = query->submit_count();
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_EXT, id1);
+ EXPECT_NE(old_submit_count, query->submit_count());
+ expected_end_cmds.end_query.Init(
+ GL_ANY_SAMPLES_PASSED_EXT, query->submit_count());
+ commands = GetPut();
+ gl_->EndQueryEXT(GL_ANY_SAMPLES_PASSED_EXT);
+ EXPECT_EQ(0, memcmp(
+ &expected_end_cmds, commands, sizeof(expected_end_cmds)));
+
+ // Test BeginQueryEXT fails if target changed.
+ ClearCommands();
+ gl_->BeginQueryEXT(GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT, id1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test GetQueryObjectuivEXT fails if unused id
+ GLuint available = 0xBDu;
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(id2, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(0xBDu, available);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test GetQueryObjectuivEXT fails if bad id
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(4567, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(0xBDu, available);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ // Test GetQueryObjectuivEXT CheckResultsAvailable
+ ClearCommands();
+ gl_->GetQueryObjectuivEXT(id1, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_EQ(0u, available);
+}
+
+TEST_F(GLES2ImplementationTest, ErrorQuery) {
+ GLuint id = 0;
+ gl_->GenQueriesEXT(1, &id);
+ ClearCommands();
+
+ // Test BeginQueryEXT does NOT insert commands.
+ gl_->BeginQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM, id);
+ EXPECT_TRUE(NoCommandsWritten());
+ QueryTracker::Query* query = GetQuery(id);
+ ASSERT_TRUE(query != NULL);
+
+ // Test EndQueryEXT sends both begin and end command
+ struct EndCmds {
+ cmds::BeginQueryEXT begin_query;
+ cmds::EndQueryEXT end_query;
+ };
+ EndCmds expected_end_cmds;
+ expected_end_cmds.begin_query.Init(
+ GL_GET_ERROR_QUERY_CHROMIUM, id, query->shm_id(), query->shm_offset());
+ expected_end_cmds.end_query.Init(
+ GL_GET_ERROR_QUERY_CHROMIUM, query->submit_count());
+ const void* commands = GetPut();
+ gl_->EndQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM);
+ EXPECT_EQ(0, memcmp(
+ &expected_end_cmds, commands, sizeof(expected_end_cmds)));
+ ClearCommands();
+
+ // Check result is not yet available.
+ GLuint available = 0xBDu;
+ gl_->GetQueryObjectuivEXT(id, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(0u, available);
+
+ // Test no commands are sent if there is a client side error.
+
+ // Generate a client side error
+ gl_->ActiveTexture(GL_TEXTURE0 - 1);
+
+ gl_->BeginQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM, id);
+ gl_->EndQueryEXT(GL_GET_ERROR_QUERY_CHROMIUM);
+ EXPECT_TRUE(NoCommandsWritten());
+
+ // Check result is available.
+ gl_->GetQueryObjectuivEXT(id, GL_QUERY_RESULT_AVAILABLE_EXT, &available);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_NE(0u, available);
+
+ // Check result.
+ GLuint result = 0xBDu;
+ gl_->GetQueryObjectuivEXT(id, GL_QUERY_RESULT_EXT, &result);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(static_cast<GLuint>(GL_INVALID_ENUM), result);
+}
+
+#if !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+TEST_F(GLES2ImplementationTest, VertexArrays) {
+ const GLuint kAttribIndex1 = 1;
+ const GLint kNumComponents1 = 3;
+ const GLsizei kClientStride = 12;
+
+ GLuint id = 0;
+ gl_->GenVertexArraysOES(1, &id);
+ ClearCommands();
+
+ gl_->BindVertexArrayOES(id);
+
+ // Test that VertexAttribPointer cannot be called with a bound buffer of 0
+ // unless the offset is NULL
+ gl_->BindBuffer(GL_ARRAY_BUFFER, 0);
+
+ gl_->VertexAttribPointer(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, kClientStride,
+ reinterpret_cast<const void*>(4));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+
+ gl_->VertexAttribPointer(
+ kAttribIndex1, kNumComponents1, GL_FLOAT, GL_FALSE, kClientStride, NULL);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+}
+#endif
+
+TEST_F(GLES2ImplementationTest, Disable) {
+ struct Cmds {
+ cmds::Disable cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_DITHER); // Note: DITHER defaults to enabled.
+
+ gl_->Disable(GL_DITHER);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ // Check it's cached and not called again.
+ ClearCommands();
+ gl_->Disable(GL_DITHER);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, Enable) {
+ struct Cmds {
+ cmds::Enable cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_BLEND); // Note: BLEND defaults to disabled.
+
+ gl_->Enable(GL_BLEND);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ // Check it's cached and not called again.
+ ClearCommands();
+ gl_->Enable(GL_BLEND);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, ConsumeTextureCHROMIUM) {
+ struct Cmds {
+ cmds::ConsumeTextureCHROMIUMImmediate cmd;
+ GLbyte data[64];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, mailbox.name);
+ gl_->ConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CreateAndConsumeTextureCHROMIUM) {
+ struct Cmds {
+ cmds::CreateAndConsumeTextureCHROMIUMImmediate cmd;
+ GLbyte data[64];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, kTexturesStartId, mailbox.name);
+ GLuint id = gl_->CreateAndConsumeTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kTexturesStartId, id);
+}
+
+TEST_F(GLES2ImplementationTest, ProduceTextureCHROMIUM) {
+ struct Cmds {
+ cmds::ProduceTextureCHROMIUMImmediate cmd;
+ GLbyte data[64];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, mailbox.name);
+ gl_->ProduceTextureCHROMIUM(GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ProduceTextureDirectCHROMIUM) {
+ struct Cmds {
+ cmds::ProduceTextureDirectCHROMIUMImmediate cmd;
+ GLbyte data[64];
+ };
+
+ Mailbox mailbox = Mailbox::Generate();
+ Cmds expected;
+ expected.cmd.Init(kTexturesStartId, GL_TEXTURE_2D, mailbox.name);
+ gl_->ProduceTextureDirectCHROMIUM(
+ kTexturesStartId, GL_TEXTURE_2D, mailbox.name);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, LimitSizeAndOffsetTo32Bit) {
+ GLsizeiptr size;
+ GLintptr offset;
+ if (sizeof(size) <= 4 || sizeof(offset) <= 4)
+ return;
+ // The below two casts should be no-op, as we return early if
+ // it's 32-bit system.
+ int64 value64 = 0x100000000;
+ size = static_cast<GLsizeiptr>(value64);
+ offset = static_cast<GLintptr>(value64);
+
+ const char kSizeOverflowMessage[] = "size more than 32-bit";
+ const char kOffsetOverflowMessage[] = "offset more than 32-bit";
+
+ const GLfloat buf[] = { 1.0, 1.0, 1.0, 1.0 };
+ const GLubyte indices[] = { 0 };
+
+ const GLuint kClientArrayBufferId = 0x789;
+ const GLuint kClientElementArrayBufferId = 0x790;
+ gl_->BindBuffer(GL_ARRAY_BUFFER, kClientArrayBufferId);
+ gl_->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, kClientElementArrayBufferId);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // Call BufferData() should succeed with legal paramaters.
+ gl_->BufferData(GL_ARRAY_BUFFER, sizeof(buf), buf, GL_DYNAMIC_DRAW);
+ gl_->BufferData(
+ GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_DYNAMIC_DRAW);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // BufferData: size
+ gl_->BufferData(GL_ARRAY_BUFFER, size, buf, GL_DYNAMIC_DRAW);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kSizeOverflowMessage, GetLastError().c_str());
+
+ // Call BufferSubData() should succeed with legal paramaters.
+ gl_->BufferSubData(GL_ARRAY_BUFFER, 0, sizeof(buf[0]), buf);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // BufferSubData: offset
+ gl_->BufferSubData(GL_ARRAY_BUFFER, offset, 1, buf);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+
+ // BufferSubData: size
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+ gl_->BufferSubData(GL_ARRAY_BUFFER, 0, size, buf);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kSizeOverflowMessage, GetLastError().c_str());
+
+ // Call MapBufferSubDataCHROMIUM() should succeed with legal paramaters.
+ void* mem =
+ gl_->MapBufferSubDataCHROMIUM(GL_ARRAY_BUFFER, 0, 1, GL_WRITE_ONLY);
+ EXPECT_TRUE(NULL != mem);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+ gl_->UnmapBufferSubDataCHROMIUM(mem);
+
+ // MapBufferSubDataCHROMIUM: offset
+ EXPECT_TRUE(NULL == gl_->MapBufferSubDataCHROMIUM(
+ GL_ARRAY_BUFFER, offset, 1, GL_WRITE_ONLY));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+
+ // MapBufferSubDataCHROMIUM: size
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+ EXPECT_TRUE(NULL == gl_->MapBufferSubDataCHROMIUM(
+ GL_ARRAY_BUFFER, 0, size, GL_WRITE_ONLY));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kSizeOverflowMessage, GetLastError().c_str());
+
+ // Call DrawElements() should succeed with legal paramaters.
+ gl_->DrawElements(GL_POINTS, 1, GL_UNSIGNED_BYTE, NULL);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // DrawElements: offset
+ gl_->DrawElements(
+ GL_POINTS, 1, GL_UNSIGNED_BYTE, reinterpret_cast<void*>(offset));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+
+ // Call DrawElementsInstancedANGLE() should succeed with legal paramaters.
+ gl_->DrawElementsInstancedANGLE(GL_POINTS, 1, GL_UNSIGNED_BYTE, NULL, 1);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // DrawElementsInstancedANGLE: offset
+ gl_->DrawElementsInstancedANGLE(
+ GL_POINTS, 1, GL_UNSIGNED_BYTE, reinterpret_cast<void*>(offset), 1);
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+
+ // Call VertexAttribPointer() should succeed with legal paramaters.
+ const GLuint kAttribIndex = 1;
+ const GLsizei kStride = 4;
+ gl_->VertexAttribPointer(
+ kAttribIndex, 1, GL_FLOAT, GL_FALSE, kStride, NULL);
+ EXPECT_EQ(GL_NO_ERROR, CheckError());
+
+ // VertexAttribPointer: offset
+ gl_->VertexAttribPointer(
+ kAttribIndex, 1, GL_FLOAT, GL_FALSE, kStride,
+ reinterpret_cast<void*>(offset));
+ EXPECT_EQ(GL_INVALID_OPERATION, CheckError());
+ EXPECT_STREQ(kOffsetOverflowMessage, GetLastError().c_str());
+}
+
+TEST_F(GLES2ImplementationManualInitTest, LoseContextOnOOM) {
+ ContextInitOptions init_options;
+ init_options.lose_context_when_out_of_memory = true;
+ ASSERT_TRUE(Initialize(init_options));
+
+ struct Cmds {
+ cmds::LoseContextCHROMIUM cmd;
+ };
+
+ GLsizei max = std::numeric_limits<GLsizei>::max();
+ EXPECT_CALL(*gpu_control_, CreateGpuMemoryBuffer(max, max, _, _, _))
+ .WillOnce(Return(static_cast<gfx::GpuMemoryBuffer*>(NULL)));
+ gl_->CreateImageCHROMIUM(max, max, 0, GL_IMAGE_MAP_CHROMIUM);
+ // The context should be lost.
+ Cmds expected;
+ expected.cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_UNKNOWN_CONTEXT_RESET_ARB);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationManualInitTest, NoLoseContextOnOOM) {
+ ContextInitOptions init_options;
+ ASSERT_TRUE(Initialize(init_options));
+
+ struct Cmds {
+ cmds::LoseContextCHROMIUM cmd;
+ };
+
+ GLsizei max = std::numeric_limits<GLsizei>::max();
+ EXPECT_CALL(*gpu_control_, CreateGpuMemoryBuffer(max, max, _, _, _))
+ .WillOnce(Return(static_cast<gfx::GpuMemoryBuffer*>(NULL)));
+ gl_->CreateImageCHROMIUM(max, max, 0, GL_IMAGE_MAP_CHROMIUM);
+ // The context should not be lost.
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationManualInitTest, FailInitOnBGRMismatch1) {
+ ContextInitOptions init_options;
+ init_options.bind_generates_resource_client = false;
+ init_options.bind_generates_resource_service = true;
+ EXPECT_FALSE(Initialize(init_options));
+}
+
+TEST_F(GLES2ImplementationManualInitTest, FailInitOnBGRMismatch2) {
+ ContextInitOptions init_options;
+ init_options.bind_generates_resource_client = true;
+ init_options.bind_generates_resource_service = false;
+ EXPECT_FALSE(Initialize(init_options));
+}
+
+#include "gpu/command_buffer/client/gles2_implementation_unittest_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
new file mode 100644
index 0000000..a42d6d5
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -0,0 +1,1926 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_implementation.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
+
+TEST_F(GLES2ImplementationTest, AttachShader) {
+ struct Cmds {
+ cmds::AttachShader cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->AttachShader(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for BindAttribLocation
+
+TEST_F(GLES2ImplementationTest, BindBuffer) {
+ struct Cmds {
+ cmds::BindBuffer cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_ARRAY_BUFFER, 2);
+
+ gl_->BindBuffer(GL_ARRAY_BUFFER, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->BindBuffer(GL_ARRAY_BUFFER, 2);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, BindFramebuffer) {
+ struct Cmds {
+ cmds::BindFramebuffer cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRAMEBUFFER, 2);
+
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->BindFramebuffer(GL_FRAMEBUFFER, 2);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, BindRenderbuffer) {
+ struct Cmds {
+ cmds::BindRenderbuffer cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, 2);
+
+ gl_->BindRenderbuffer(GL_RENDERBUFFER, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->BindRenderbuffer(GL_RENDERBUFFER, 2);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, BlendColor) {
+ struct Cmds {
+ cmds::BlendColor cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->BlendColor(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendEquation) {
+ struct Cmds {
+ cmds::BlendEquation cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FUNC_SUBTRACT);
+
+ gl_->BlendEquation(GL_FUNC_SUBTRACT);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendEquationSeparate) {
+ struct Cmds {
+ cmds::BlendEquationSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FUNC_SUBTRACT, GL_FUNC_ADD);
+
+ gl_->BlendEquationSeparate(GL_FUNC_SUBTRACT, GL_FUNC_ADD);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendFunc) {
+ struct Cmds {
+ cmds::BlendFunc cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_ZERO, GL_ZERO);
+
+ gl_->BlendFunc(GL_ZERO, GL_ZERO);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendFuncSeparate) {
+ struct Cmds {
+ cmds::BlendFuncSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO);
+
+ gl_->BlendFuncSeparate(GL_ZERO, GL_ZERO, GL_ZERO, GL_ZERO);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CheckFramebufferStatus) {
+ struct Cmds {
+ cmds::CheckFramebufferStatus cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::CheckFramebufferStatus::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->CheckFramebufferStatus(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, Clear) {
+ struct Cmds {
+ cmds::Clear cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->Clear(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ClearColor) {
+ struct Cmds {
+ cmds::ClearColor cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->ClearColor(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ClearDepthf) {
+ struct Cmds {
+ cmds::ClearDepthf cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(0.5f);
+
+ gl_->ClearDepthf(0.5f);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ClearStencil) {
+ struct Cmds {
+ cmds::ClearStencil cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->ClearStencil(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ColorMask) {
+ struct Cmds {
+ cmds::ColorMask cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(true, true, true, true);
+
+ gl_->ColorMask(true, true, true, true);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CompileShader) {
+ struct Cmds {
+ cmds::CompileShader cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->CompileShader(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for CompressedTexImage2D
+// TODO: Implement unit test for CompressedTexSubImage2D
+
+TEST_F(GLES2ImplementationTest, CopyTexImage2D) {
+ struct Cmds {
+ cmds::CopyTexImage2D cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2, GL_ALPHA, 4, 5, 6, 7);
+
+ gl_->CopyTexImage2D(GL_TEXTURE_2D, 2, GL_ALPHA, 4, 5, 6, 7, 0);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CopyTexImage2DInvalidConstantArg7) {
+ gl_->CopyTexImage2D(GL_TEXTURE_2D, 2, GL_ALPHA, 4, 5, 6, 7, 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, CopyTexSubImage2D) {
+ struct Cmds {
+ cmds::CopyTexSubImage2D cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, 8);
+
+ gl_->CopyTexSubImage2D(GL_TEXTURE_2D, 2, 3, 4, 5, 6, 7, 8);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CullFace) {
+ struct Cmds {
+ cmds::CullFace cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRONT);
+
+ gl_->CullFace(GL_FRONT);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteBuffers) {
+ GLuint ids[2] = {kBuffersStartId, kBuffersStartId + 1};
+ struct Cmds {
+ cmds::DeleteBuffersImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kBuffersStartId;
+ expected.data[1] = kBuffersStartId + 1;
+ gl_->DeleteBuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteFramebuffers) {
+ GLuint ids[2] = {kFramebuffersStartId, kFramebuffersStartId + 1};
+ struct Cmds {
+ cmds::DeleteFramebuffersImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kFramebuffersStartId;
+ expected.data[1] = kFramebuffersStartId + 1;
+ gl_->DeleteFramebuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteProgram) {
+ struct Cmds {
+ cmds::DeleteProgram cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->DeleteProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteRenderbuffers) {
+ GLuint ids[2] = {kRenderbuffersStartId, kRenderbuffersStartId + 1};
+ struct Cmds {
+ cmds::DeleteRenderbuffersImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kRenderbuffersStartId;
+ expected.data[1] = kRenderbuffersStartId + 1;
+ gl_->DeleteRenderbuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteShader) {
+ struct Cmds {
+ cmds::DeleteShader cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->DeleteShader(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DeleteTextures) {
+ GLuint ids[2] = {kTexturesStartId, kTexturesStartId + 1};
+ struct Cmds {
+ cmds::DeleteTexturesImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kTexturesStartId;
+ expected.data[1] = kTexturesStartId + 1;
+ gl_->DeleteTextures(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DepthFunc) {
+ struct Cmds {
+ cmds::DepthFunc cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_NEVER);
+
+ gl_->DepthFunc(GL_NEVER);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DepthMask) {
+ struct Cmds {
+ cmds::DepthMask cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(true);
+
+ gl_->DepthMask(true);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DepthRangef) {
+ struct Cmds {
+ cmds::DepthRangef cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->DepthRangef(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DetachShader) {
+ struct Cmds {
+ cmds::DetachShader cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->DetachShader(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DisableVertexAttribArray) {
+ struct Cmds {
+ cmds::DisableVertexAttribArray cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->DisableVertexAttribArray(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawArrays) {
+ struct Cmds {
+ cmds::DrawArrays cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_POINTS, 2, 3);
+
+ gl_->DrawArrays(GL_POINTS, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, EnableVertexAttribArray) {
+ struct Cmds {
+ cmds::EnableVertexAttribArray cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->EnableVertexAttribArray(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Flush) {
+ struct Cmds {
+ cmds::Flush cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->Flush();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, FramebufferRenderbuffer) {
+ struct Cmds {
+ cmds::FramebufferRenderbuffer cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, 4);
+
+ gl_->FramebufferRenderbuffer(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, FramebufferTexture2D) {
+ struct Cmds {
+ cmds::FramebufferTexture2D cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4);
+
+ gl_->FramebufferTexture2D(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 0);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, FramebufferTexture2DInvalidConstantArg4) {
+ gl_->FramebufferTexture2D(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 1);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, FrontFace) {
+ struct Cmds {
+ cmds::FrontFace cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_CW);
+
+ gl_->FrontFace(GL_CW);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GenBuffers) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenBuffersImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kBuffersStartId;
+ expected.data[1] = kBuffersStartId + 1;
+ gl_->GenBuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kBuffersStartId, ids[0]);
+ EXPECT_EQ(kBuffersStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, GenerateMipmap) {
+ struct Cmds {
+ cmds::GenerateMipmap cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D);
+
+ gl_->GenerateMipmap(GL_TEXTURE_2D);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GenFramebuffers) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenFramebuffersImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kFramebuffersStartId;
+ expected.data[1] = kFramebuffersStartId + 1;
+ gl_->GenFramebuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kFramebuffersStartId, ids[0]);
+ EXPECT_EQ(kFramebuffersStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, GenRenderbuffers) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenRenderbuffersImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kRenderbuffersStartId;
+ expected.data[1] = kRenderbuffersStartId + 1;
+ gl_->GenRenderbuffers(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kRenderbuffersStartId, ids[0]);
+ EXPECT_EQ(kRenderbuffersStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, GenTextures) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenTexturesImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kTexturesStartId;
+ expected.data[1] = kTexturesStartId + 1;
+ gl_->GenTextures(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kTexturesStartId, ids[0]);
+ EXPECT_EQ(kTexturesStartId + 1, ids[1]);
+}
+// TODO: Implement unit test for GetActiveAttrib
+// TODO: Implement unit test for GetActiveUniform
+// TODO: Implement unit test for GetAttachedShaders
+// TODO: Implement unit test for GetAttribLocation
+
+TEST_F(GLES2ImplementationTest, GetBooleanv) {
+ struct Cmds {
+ cmds::GetBooleanv cmd;
+ };
+ typedef cmds::GetBooleanv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetBooleanv(123, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetBufferParameteriv) {
+ struct Cmds {
+ cmds::GetBufferParameteriv cmd;
+ };
+ typedef cmds::GetBufferParameteriv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_BUFFER_SIZE, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetBufferParameteriv(123, GL_BUFFER_SIZE, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetFloatv) {
+ struct Cmds {
+ cmds::GetFloatv cmd;
+ };
+ typedef cmds::GetFloatv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetFloatv(123, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetFramebufferAttachmentParameteriv) {
+ struct Cmds {
+ cmds::GetFramebufferAttachmentParameteriv cmd;
+ };
+ typedef cmds::GetFramebufferAttachmentParameteriv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ result1.id,
+ result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetFramebufferAttachmentParameteriv(
+ 123,
+ GL_COLOR_ATTACHMENT0,
+ GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
+ &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetIntegerv) {
+ struct Cmds {
+ cmds::GetIntegerv cmd;
+ };
+ typedef cmds::GetIntegerv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetIntegerv(123, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetProgramiv) {
+ struct Cmds {
+ cmds::GetProgramiv cmd;
+ };
+ typedef cmds::GetProgramiv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_DELETE_STATUS, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetProgramiv(123, GL_DELETE_STATUS, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+// TODO: Implement unit test for GetProgramInfoLog
+
+TEST_F(GLES2ImplementationTest, GetRenderbufferParameteriv) {
+ struct Cmds {
+ cmds::GetRenderbufferParameteriv cmd;
+ };
+ typedef cmds::GetRenderbufferParameteriv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_RENDERBUFFER_RED_SIZE, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetRenderbufferParameteriv(123, GL_RENDERBUFFER_RED_SIZE, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetShaderiv) {
+ struct Cmds {
+ cmds::GetShaderiv cmd;
+ };
+ typedef cmds::GetShaderiv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_SHADER_TYPE, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetShaderiv(123, GL_SHADER_TYPE, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+// TODO: Implement unit test for GetShaderInfoLog
+// TODO: Implement unit test for GetShaderPrecisionFormat
+
+TEST_F(GLES2ImplementationTest, GetTexParameterfv) {
+ struct Cmds {
+ cmds::GetTexParameterfv cmd;
+ };
+ typedef cmds::GetTexParameterfv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_TEXTURE_MAG_FILTER, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetTexParameterfv(123, GL_TEXTURE_MAG_FILTER, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetTexParameteriv) {
+ struct Cmds {
+ cmds::GetTexParameteriv cmd;
+ };
+ typedef cmds::GetTexParameteriv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(123, GL_TEXTURE_MAG_FILTER, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetTexParameteriv(123, GL_TEXTURE_MAG_FILTER, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+// TODO: Implement unit test for GetUniformfv
+// TODO: Implement unit test for GetUniformiv
+// TODO: Implement unit test for GetUniformLocation
+
+TEST_F(GLES2ImplementationTest, GetVertexAttribfv) {
+ struct Cmds {
+ cmds::GetVertexAttribfv cmd;
+ };
+ typedef cmds::GetVertexAttribfv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(
+ 123, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetVertexAttribfv(123, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, GetVertexAttribiv) {
+ struct Cmds {
+ cmds::GetVertexAttribiv cmd;
+ };
+ typedef cmds::GetVertexAttribiv::Result Result;
+ Result::Type result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 = GetExpectedResultMemory(4);
+ expected.cmd.Init(
+ 123, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<Result::Type>(1)))
+ .RetiresOnSaturation();
+ gl_->GetVertexAttribiv(123, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<Result::Type>(1), result);
+}
+
+TEST_F(GLES2ImplementationTest, Hint) {
+ struct Cmds {
+ cmds::Hint cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_GENERATE_MIPMAP_HINT, GL_FASTEST);
+
+ gl_->Hint(GL_GENERATE_MIPMAP_HINT, GL_FASTEST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, IsBuffer) {
+ struct Cmds {
+ cmds::IsBuffer cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsBuffer::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsBuffer(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsEnabled) {
+ struct Cmds {
+ cmds::IsEnabled cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsEnabled::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsEnabled(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsFramebuffer) {
+ struct Cmds {
+ cmds::IsFramebuffer cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsFramebuffer::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsFramebuffer(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsProgram) {
+ struct Cmds {
+ cmds::IsProgram cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsProgram::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsRenderbuffer) {
+ struct Cmds {
+ cmds::IsRenderbuffer cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsRenderbuffer::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsRenderbuffer(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsShader) {
+ struct Cmds {
+ cmds::IsShader cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsShader::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsShader(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, IsTexture) {
+ struct Cmds {
+ cmds::IsTexture cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsTexture::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsTexture(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+
+TEST_F(GLES2ImplementationTest, LineWidth) {
+ struct Cmds {
+ cmds::LineWidth cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(0.5f);
+
+ gl_->LineWidth(0.5f);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, LinkProgram) {
+ struct Cmds {
+ cmds::LinkProgram cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->LinkProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, PixelStorei) {
+ struct Cmds {
+ cmds::PixelStorei cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_PACK_ALIGNMENT, 1);
+
+ gl_->PixelStorei(GL_PACK_ALIGNMENT, 1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, PolygonOffset) {
+ struct Cmds {
+ cmds::PolygonOffset cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->PolygonOffset(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ReleaseShaderCompiler) {
+ struct Cmds {
+ cmds::ReleaseShaderCompiler cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->ReleaseShaderCompiler();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, RenderbufferStorage) {
+ struct Cmds {
+ cmds::RenderbufferStorage cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
+
+ gl_->RenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, SampleCoverage) {
+ struct Cmds {
+ cmds::SampleCoverage cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, true);
+
+ gl_->SampleCoverage(1, true);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Scissor) {
+ struct Cmds {
+ cmds::Scissor cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->Scissor(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilFunc) {
+ struct Cmds {
+ cmds::StencilFunc cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_NEVER, 2, 3);
+
+ gl_->StencilFunc(GL_NEVER, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilFuncSeparate) {
+ struct Cmds {
+ cmds::StencilFuncSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRONT, GL_NEVER, 3, 4);
+
+ gl_->StencilFuncSeparate(GL_FRONT, GL_NEVER, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilMask) {
+ struct Cmds {
+ cmds::StencilMask cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->StencilMask(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilMaskSeparate) {
+ struct Cmds {
+ cmds::StencilMaskSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRONT, 2);
+
+ gl_->StencilMaskSeparate(GL_FRONT, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilOp) {
+ struct Cmds {
+ cmds::StencilOp cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_KEEP, GL_INCR, GL_KEEP);
+
+ gl_->StencilOp(GL_KEEP, GL_INCR, GL_KEEP);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, StencilOpSeparate) {
+ struct Cmds {
+ cmds::StencilOpSeparate cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRONT, GL_INCR, GL_KEEP, GL_KEEP);
+
+ gl_->StencilOpSeparate(GL_FRONT, GL_INCR, GL_KEEP, GL_KEEP);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, TexParameterf) {
+ struct Cmds {
+ cmds::TexParameterf cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ gl_->TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, TexParameterfv) {
+ GLfloat data[1] = {0};
+ struct Cmds {
+ cmds::TexParameterfvImmediate cmd;
+ GLfloat data[1];
+ };
+
+ for (int jj = 0; jj < 1; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &data[0]);
+ gl_->TexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, TexParameteri) {
+ struct Cmds {
+ cmds::TexParameteri cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, TexParameteriv) {
+ GLint data[1] = {0};
+ struct Cmds {
+ cmds::TexParameterivImmediate cmd;
+ GLint data[1];
+ };
+
+ for (int jj = 0; jj < 1; ++jj) {
+ data[jj] = static_cast<GLint>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &data[0]);
+ gl_->TexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform1f) {
+ struct Cmds {
+ cmds::Uniform1f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->Uniform1f(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform1fv) {
+ GLfloat data[2][1] = {{0}};
+ struct Cmds {
+ cmds::Uniform1fvImmediate cmd;
+ GLfloat data[2][1];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 1; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 1 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform1fv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform1i) {
+ struct Cmds {
+ cmds::Uniform1i cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->Uniform1i(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform1iv) {
+ GLint data[2][1] = {{0}};
+ struct Cmds {
+ cmds::Uniform1ivImmediate cmd;
+ GLint data[2][1];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 1; ++jj) {
+ data[ii][jj] = static_cast<GLint>(ii * 1 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform1iv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform2f) {
+ struct Cmds {
+ cmds::Uniform2f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->Uniform2f(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform2fv) {
+ GLfloat data[2][2] = {{0}};
+ struct Cmds {
+ cmds::Uniform2fvImmediate cmd;
+ GLfloat data[2][2];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 2; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 2 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform2fv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform2i) {
+ struct Cmds {
+ cmds::Uniform2i cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->Uniform2i(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform2iv) {
+ GLint data[2][2] = {{0}};
+ struct Cmds {
+ cmds::Uniform2ivImmediate cmd;
+ GLint data[2][2];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 2; ++jj) {
+ data[ii][jj] = static_cast<GLint>(ii * 2 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform2iv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform3f) {
+ struct Cmds {
+ cmds::Uniform3f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->Uniform3f(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform3fv) {
+ GLfloat data[2][3] = {{0}};
+ struct Cmds {
+ cmds::Uniform3fvImmediate cmd;
+ GLfloat data[2][3];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 3; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 3 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform3fv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform3i) {
+ struct Cmds {
+ cmds::Uniform3i cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->Uniform3i(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform3iv) {
+ GLint data[2][3] = {{0}};
+ struct Cmds {
+ cmds::Uniform3ivImmediate cmd;
+ GLint data[2][3];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 3; ++jj) {
+ data[ii][jj] = static_cast<GLint>(ii * 3 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform3iv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform4f) {
+ struct Cmds {
+ cmds::Uniform4f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5);
+
+ gl_->Uniform4f(1, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform4fv) {
+ GLfloat data[2][4] = {{0}};
+ struct Cmds {
+ cmds::Uniform4fvImmediate cmd;
+ GLfloat data[2][4];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 4 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform4fv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform4i) {
+ struct Cmds {
+ cmds::Uniform4i cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5);
+
+ gl_->Uniform4i(1, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Uniform4iv) {
+ GLint data[2][4] = {{0}};
+ struct Cmds {
+ cmds::Uniform4ivImmediate cmd;
+ GLint data[2][4];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ data[ii][jj] = static_cast<GLint>(ii * 4 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->Uniform4iv(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix2fv) {
+ GLfloat data[2][4] = {{0}};
+ struct Cmds {
+ cmds::UniformMatrix2fvImmediate cmd;
+ GLfloat data[2][4];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 4 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->UniformMatrix2fv(1, 2, false, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix2fvInvalidConstantArg2) {
+ GLfloat data[2][4] = {{0}};
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 4; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 4 + jj);
+ }
+ }
+ gl_->UniformMatrix2fv(1, 2, true, &data[0][0]);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix3fv) {
+ GLfloat data[2][9] = {{0}};
+ struct Cmds {
+ cmds::UniformMatrix3fvImmediate cmd;
+ GLfloat data[2][9];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 9; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 9 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->UniformMatrix3fv(1, 2, false, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix3fvInvalidConstantArg2) {
+ GLfloat data[2][9] = {{0}};
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 9; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 9 + jj);
+ }
+ }
+ gl_->UniformMatrix3fv(1, 2, true, &data[0][0]);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix4fv) {
+ GLfloat data[2][16] = {{0}};
+ struct Cmds {
+ cmds::UniformMatrix4fvImmediate cmd;
+ GLfloat data[2][16];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 16; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 16 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->UniformMatrix4fv(1, 2, false, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, UniformMatrix4fvInvalidConstantArg2) {
+ GLfloat data[2][16] = {{0}};
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 16; ++jj) {
+ data[ii][jj] = static_cast<GLfloat>(ii * 16 + jj);
+ }
+ }
+ gl_->UniformMatrix4fv(1, 2, true, &data[0][0]);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, UseProgram) {
+ struct Cmds {
+ cmds::UseProgram cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->UseProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ ClearCommands();
+ gl_->UseProgram(1);
+ EXPECT_TRUE(NoCommandsWritten());
+}
+
+TEST_F(GLES2ImplementationTest, ValidateProgram) {
+ struct Cmds {
+ cmds::ValidateProgram cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->ValidateProgram(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib1f) {
+ struct Cmds {
+ cmds::VertexAttrib1f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->VertexAttrib1f(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib1fv) {
+ GLfloat data[1] = {0};
+ struct Cmds {
+ cmds::VertexAttrib1fvImmediate cmd;
+ GLfloat data[1];
+ };
+
+ for (int jj = 0; jj < 1; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, &data[0]);
+ gl_->VertexAttrib1fv(1, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib2f) {
+ struct Cmds {
+ cmds::VertexAttrib2f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->VertexAttrib2f(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib2fv) {
+ GLfloat data[2] = {0};
+ struct Cmds {
+ cmds::VertexAttrib2fvImmediate cmd;
+ GLfloat data[2];
+ };
+
+ for (int jj = 0; jj < 2; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, &data[0]);
+ gl_->VertexAttrib2fv(1, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib3f) {
+ struct Cmds {
+ cmds::VertexAttrib3f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->VertexAttrib3f(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib3fv) {
+ GLfloat data[3] = {0};
+ struct Cmds {
+ cmds::VertexAttrib3fvImmediate cmd;
+ GLfloat data[3];
+ };
+
+ for (int jj = 0; jj < 3; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, &data[0]);
+ gl_->VertexAttrib3fv(1, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib4f) {
+ struct Cmds {
+ cmds::VertexAttrib4f cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5);
+
+ gl_->VertexAttrib4f(1, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttrib4fv) {
+ GLfloat data[4] = {0};
+ struct Cmds {
+ cmds::VertexAttrib4fvImmediate cmd;
+ GLfloat data[4];
+ };
+
+ for (int jj = 0; jj < 4; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, &data[0]);
+ gl_->VertexAttrib4fv(1, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, Viewport) {
+ struct Cmds {
+ cmds::Viewport cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4);
+
+ gl_->Viewport(1, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlitFramebufferCHROMIUM) {
+ struct Cmds {
+ cmds::BlitFramebufferCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5, 6, 7, 8, 9, GL_NEAREST);
+
+ gl_->BlitFramebufferCHROMIUM(1, 2, 3, 4, 5, 6, 7, 8, 9, GL_NEAREST);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, RenderbufferStorageMultisampleCHROMIUM) {
+ struct Cmds {
+ cmds::RenderbufferStorageMultisampleCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, 2, GL_RGBA4, 4, 5);
+
+ gl_->RenderbufferStorageMultisampleCHROMIUM(
+ GL_RENDERBUFFER, 2, GL_RGBA4, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, RenderbufferStorageMultisampleEXT) {
+ struct Cmds {
+ cmds::RenderbufferStorageMultisampleEXT cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_RENDERBUFFER, 2, GL_RGBA4, 4, 5);
+
+ gl_->RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER, 2, GL_RGBA4, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, FramebufferTexture2DMultisampleEXT) {
+ struct Cmds {
+ cmds::FramebufferTexture2DMultisampleEXT cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 6);
+
+ gl_->FramebufferTexture2DMultisampleEXT(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 0, 6);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest,
+ FramebufferTexture2DMultisampleEXTInvalidConstantArg4) {
+ gl_->FramebufferTexture2DMultisampleEXT(
+ GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 4, 1, 6);
+ EXPECT_TRUE(NoCommandsWritten());
+ EXPECT_EQ(GL_INVALID_VALUE, CheckError());
+}
+
+TEST_F(GLES2ImplementationTest, TexStorage2DEXT) {
+ struct Cmds {
+ cmds::TexStorage2DEXT cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2, GL_RGB565, 4, 5);
+
+ gl_->TexStorage2DEXT(GL_TEXTURE_2D, 2, GL_RGB565, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GenQueriesEXT) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenQueriesEXTImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kQueriesStartId;
+ expected.data[1] = kQueriesStartId + 1;
+ gl_->GenQueriesEXT(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kQueriesStartId, ids[0]);
+ EXPECT_EQ(kQueriesStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, DeleteQueriesEXT) {
+ GLuint ids[2] = {kQueriesStartId, kQueriesStartId + 1};
+ struct Cmds {
+ cmds::DeleteQueriesEXTImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kQueriesStartId;
+ expected.data[1] = kQueriesStartId + 1;
+ gl_->DeleteQueriesEXT(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for BeginQueryEXT
+// TODO: Implement unit test for InsertEventMarkerEXT
+// TODO: Implement unit test for PushGroupMarkerEXT
+
+TEST_F(GLES2ImplementationTest, PopGroupMarkerEXT) {
+ struct Cmds {
+ cmds::PopGroupMarkerEXT cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->PopGroupMarkerEXT();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, GenVertexArraysOES) {
+ GLuint ids[2] = {
+ 0,
+ };
+ struct Cmds {
+ cmds::GenVertexArraysOESImmediate gen;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.gen.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kVertexArraysStartId;
+ expected.data[1] = kVertexArraysStartId + 1;
+ gl_->GenVertexArraysOES(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(kVertexArraysStartId, ids[0]);
+ EXPECT_EQ(kVertexArraysStartId + 1, ids[1]);
+}
+
+TEST_F(GLES2ImplementationTest, DeleteVertexArraysOES) {
+ GLuint ids[2] = {kVertexArraysStartId, kVertexArraysStartId + 1};
+ struct Cmds {
+ cmds::DeleteVertexArraysOESImmediate del;
+ GLuint data[2];
+ };
+ Cmds expected;
+ expected.del.Init(arraysize(ids), &ids[0]);
+ expected.data[0] = kVertexArraysStartId;
+ expected.data[1] = kVertexArraysStartId + 1;
+ gl_->DeleteVertexArraysOES(arraysize(ids), &ids[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, IsVertexArrayOES) {
+ struct Cmds {
+ cmds::IsVertexArrayOES cmd;
+ };
+
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(cmds::IsVertexArrayOES::Result));
+ expected.cmd.Init(1, result1.id, result1.offset);
+
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, uint32_t(1)))
+ .RetiresOnSaturation();
+
+ GLboolean result = gl_->IsVertexArrayOES(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_TRUE(result);
+}
+// TODO: Implement unit test for EnableFeatureCHROMIUM
+
+TEST_F(GLES2ImplementationTest, ResizeCHROMIUM) {
+ struct Cmds {
+ cmds::ResizeCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->ResizeCHROMIUM(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for GetRequestableExtensionsCHROMIUM
+// TODO: Implement unit test for CreateStreamTextureCHROMIUM
+// TODO: Implement unit test for GetTranslatedShaderSourceANGLE
+
+TEST_F(GLES2ImplementationTest, TexImageIOSurface2DCHROMIUM) {
+ struct Cmds {
+ cmds::TexImageIOSurface2DCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2, 3, 4, 5);
+
+ gl_->TexImageIOSurface2DCHROMIUM(GL_TEXTURE_2D, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, CopyTextureCHROMIUM) {
+ struct Cmds {
+ cmds::CopyTextureCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, GL_ALPHA, GL_UNSIGNED_BYTE);
+
+ gl_->CopyTextureCHROMIUM(1, 2, 3, 4, GL_ALPHA, GL_UNSIGNED_BYTE);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawArraysInstancedANGLE) {
+ struct Cmds {
+ cmds::DrawArraysInstancedANGLE cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_POINTS, 2, 3, 4);
+
+ gl_->DrawArraysInstancedANGLE(GL_POINTS, 2, 3, 4);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, VertexAttribDivisorANGLE) {
+ struct Cmds {
+ cmds::VertexAttribDivisorANGLE cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->VertexAttribDivisorANGLE(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for GenMailboxCHROMIUM
+// TODO: Implement unit test for BindUniformLocationCHROMIUM
+
+TEST_F(GLES2ImplementationTest, BindTexImage2DCHROMIUM) {
+ struct Cmds {
+ cmds::BindTexImage2DCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2);
+
+ gl_->BindTexImage2DCHROMIUM(GL_TEXTURE_2D, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ReleaseTexImage2DCHROMIUM) {
+ struct Cmds {
+ cmds::ReleaseTexImage2DCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_TEXTURE_2D, 2);
+
+ gl_->ReleaseTexImage2DCHROMIUM(GL_TEXTURE_2D, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DiscardFramebufferEXT) {
+ GLenum data[2][1] = {{0}};
+ struct Cmds {
+ cmds::DiscardFramebufferEXTImmediate cmd;
+ GLenum data[2][1];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 2; ++ii) {
+ for (int jj = 0; jj < 1; ++jj) {
+ data[ii][jj] = static_cast<GLenum>(ii * 1 + jj);
+ }
+ }
+ expected.cmd.Init(1, 2, &data[0][0]);
+ gl_->DiscardFramebufferEXT(1, 2, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, LoseContextCHROMIUM) {
+ struct Cmds {
+ cmds::LoseContextCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_GUILTY_CONTEXT_RESET_ARB, GL_GUILTY_CONTEXT_RESET_ARB);
+
+ gl_->LoseContextCHROMIUM(GL_GUILTY_CONTEXT_RESET_ARB,
+ GL_GUILTY_CONTEXT_RESET_ARB);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+// TODO: Implement unit test for InsertSyncPointCHROMIUM
+
+TEST_F(GLES2ImplementationTest, WaitSyncPointCHROMIUM) {
+ struct Cmds {
+ cmds::WaitSyncPointCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1);
+
+ gl_->WaitSyncPointCHROMIUM(1);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DrawBuffersEXT) {
+ GLenum data[1][1] = {{0}};
+ struct Cmds {
+ cmds::DrawBuffersEXTImmediate cmd;
+ GLenum data[1][1];
+ };
+
+ Cmds expected;
+ for (int ii = 0; ii < 1; ++ii) {
+ for (int jj = 0; jj < 1; ++jj) {
+ data[ii][jj] = static_cast<GLenum>(ii * 1 + jj);
+ }
+ }
+ expected.cmd.Init(1, &data[0][0]);
+ gl_->DrawBuffersEXT(1, &data[0][0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DiscardBackbufferCHROMIUM) {
+ struct Cmds {
+ cmds::DiscardBackbufferCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init();
+
+ gl_->DiscardBackbufferCHROMIUM();
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MatrixLoadfCHROMIUM) {
+ GLfloat data[16] = {0};
+ struct Cmds {
+ cmds::MatrixLoadfCHROMIUMImmediate cmd;
+ GLfloat data[16];
+ };
+
+ for (int jj = 0; jj < 16; ++jj) {
+ data[jj] = static_cast<GLfloat>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(GL_PATH_PROJECTION_CHROMIUM, &data[0]);
+ gl_->MatrixLoadfCHROMIUM(GL_PATH_PROJECTION_CHROMIUM, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, MatrixLoadIdentityCHROMIUM) {
+ struct Cmds {
+ cmds::MatrixLoadIdentityCHROMIUM cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(GL_PATH_PROJECTION_CHROMIUM);
+
+ gl_->MatrixLoadIdentityCHROMIUM(GL_PATH_PROJECTION_CHROMIUM);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_interface.h b/gpu/command_buffer/client/gles2_interface.h
new file mode 100644
index 0000000..ca05308
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_H_
+
+#include <GLES2/gl2.h>
+
+#include "base/compiler_specific.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This class is the interface for all client side GL functions.
+class GLES2Interface {
+ public:
+ GLES2Interface() {}
+ virtual ~GLES2Interface() {}
+
+ // Include the auto-generated part of this class. We split this because
+ // it means we can easily edit the non-auto generated parts right here in
+ // this file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_interface_autogen.h"
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_H_
diff --git a/gpu/command_buffer/client/gles2_interface_autogen.h b/gpu/command_buffer/client/gles2_interface_autogen.h
new file mode 100644
index 0000000..abfc598
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -0,0 +1,503 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_interface.h to declare the
+// GL api functions.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
+
+virtual void ActiveTexture(GLenum texture) = 0;
+virtual void AttachShader(GLuint program, GLuint shader) = 0;
+virtual void BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) = 0;
+virtual void BindBuffer(GLenum target, GLuint buffer) = 0;
+virtual void BindFramebuffer(GLenum target, GLuint framebuffer) = 0;
+virtual void BindRenderbuffer(GLenum target, GLuint renderbuffer) = 0;
+virtual void BindTexture(GLenum target, GLuint texture) = 0;
+virtual void BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) = 0;
+virtual void BlendEquation(GLenum mode) = 0;
+virtual void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) = 0;
+virtual void BlendFunc(GLenum sfactor, GLenum dfactor) = 0;
+virtual void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) = 0;
+virtual void BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) = 0;
+virtual void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) = 0;
+virtual GLenum CheckFramebufferStatus(GLenum target) = 0;
+virtual void Clear(GLbitfield mask) = 0;
+virtual void ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) = 0;
+virtual void ClearDepthf(GLclampf depth) = 0;
+virtual void ClearStencil(GLint s) = 0;
+virtual void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) = 0;
+virtual void CompileShader(GLuint shader) = 0;
+virtual void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) = 0;
+virtual void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) = 0;
+virtual void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) = 0;
+virtual void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual GLuint CreateProgram() = 0;
+virtual GLuint CreateShader(GLenum type) = 0;
+virtual void CullFace(GLenum mode) = 0;
+virtual void DeleteBuffers(GLsizei n, const GLuint* buffers) = 0;
+virtual void DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) = 0;
+virtual void DeleteProgram(GLuint program) = 0;
+virtual void DeleteRenderbuffers(GLsizei n, const GLuint* renderbuffers) = 0;
+virtual void DeleteShader(GLuint shader) = 0;
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) = 0;
+virtual void DepthFunc(GLenum func) = 0;
+virtual void DepthMask(GLboolean flag) = 0;
+virtual void DepthRangef(GLclampf zNear, GLclampf zFar) = 0;
+virtual void DetachShader(GLuint program, GLuint shader) = 0;
+virtual void Disable(GLenum cap) = 0;
+virtual void DisableVertexAttribArray(GLuint index) = 0;
+virtual void DrawArrays(GLenum mode, GLint first, GLsizei count) = 0;
+virtual void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) = 0;
+virtual void Enable(GLenum cap) = 0;
+virtual void EnableVertexAttribArray(GLuint index) = 0;
+virtual void Finish() = 0;
+virtual void Flush() = 0;
+virtual void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) = 0;
+virtual void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) = 0;
+virtual void FrontFace(GLenum mode) = 0;
+virtual void GenBuffers(GLsizei n, GLuint* buffers) = 0;
+virtual void GenerateMipmap(GLenum target) = 0;
+virtual void GenFramebuffers(GLsizei n, GLuint* framebuffers) = 0;
+virtual void GenRenderbuffers(GLsizei n, GLuint* renderbuffers) = 0;
+virtual void GenTextures(GLsizei n, GLuint* textures) = 0;
+virtual void GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) = 0;
+virtual void GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) = 0;
+virtual void GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) = 0;
+virtual GLint GetAttribLocation(GLuint program, const char* name) = 0;
+virtual void GetBooleanv(GLenum pname, GLboolean* params) = 0;
+virtual void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) = 0;
+virtual GLenum GetError() = 0;
+virtual void GetFloatv(GLenum pname, GLfloat* params) = 0;
+virtual void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) = 0;
+virtual void GetIntegerv(GLenum pname, GLint* params) = 0;
+virtual void GetProgramiv(GLuint program, GLenum pname, GLint* params) = 0;
+virtual void GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) = 0;
+virtual void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) = 0;
+virtual void GetShaderiv(GLuint shader, GLenum pname, GLint* params) = 0;
+virtual void GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) = 0;
+virtual void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) = 0;
+virtual void GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) = 0;
+virtual const GLubyte* GetString(GLenum name) = 0;
+virtual void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) = 0;
+virtual void GetTexParameteriv(GLenum target, GLenum pname, GLint* params) = 0;
+virtual void GetUniformfv(GLuint program, GLint location, GLfloat* params) = 0;
+virtual void GetUniformiv(GLuint program, GLint location, GLint* params) = 0;
+virtual GLint GetUniformLocation(GLuint program, const char* name) = 0;
+virtual void GetVertexAttribfv(GLuint index, GLenum pname, GLfloat* params) = 0;
+virtual void GetVertexAttribiv(GLuint index, GLenum pname, GLint* params) = 0;
+virtual void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) = 0;
+virtual void Hint(GLenum target, GLenum mode) = 0;
+virtual GLboolean IsBuffer(GLuint buffer) = 0;
+virtual GLboolean IsEnabled(GLenum cap) = 0;
+virtual GLboolean IsFramebuffer(GLuint framebuffer) = 0;
+virtual GLboolean IsProgram(GLuint program) = 0;
+virtual GLboolean IsRenderbuffer(GLuint renderbuffer) = 0;
+virtual GLboolean IsShader(GLuint shader) = 0;
+virtual GLboolean IsTexture(GLuint texture) = 0;
+virtual void LineWidth(GLfloat width) = 0;
+virtual void LinkProgram(GLuint program) = 0;
+virtual void PixelStorei(GLenum pname, GLint param) = 0;
+virtual void PolygonOffset(GLfloat factor, GLfloat units) = 0;
+virtual void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) = 0;
+virtual void ReleaseShaderCompiler() = 0;
+virtual void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual void SampleCoverage(GLclampf value, GLboolean invert) = 0;
+virtual void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) = 0;
+virtual void ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) = 0;
+virtual void ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) = 0;
+virtual void ShallowFinishCHROMIUM() = 0;
+virtual void ShallowFlushCHROMIUM() = 0;
+virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) = 0;
+virtual void StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) = 0;
+virtual void StencilMask(GLuint mask) = 0;
+virtual void StencilMaskSeparate(GLenum face, GLuint mask) = 0;
+virtual void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) = 0;
+virtual void StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) = 0;
+virtual void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) = 0;
+virtual void TexParameterf(GLenum target, GLenum pname, GLfloat param) = 0;
+virtual void TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) = 0;
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) = 0;
+virtual void TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) = 0;
+virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) = 0;
+virtual void Uniform1f(GLint location, GLfloat x) = 0;
+virtual void Uniform1fv(GLint location, GLsizei count, const GLfloat* v) = 0;
+virtual void Uniform1i(GLint location, GLint x) = 0;
+virtual void Uniform1iv(GLint location, GLsizei count, const GLint* v) = 0;
+virtual void Uniform2f(GLint location, GLfloat x, GLfloat y) = 0;
+virtual void Uniform2fv(GLint location, GLsizei count, const GLfloat* v) = 0;
+virtual void Uniform2i(GLint location, GLint x, GLint y) = 0;
+virtual void Uniform2iv(GLint location, GLsizei count, const GLint* v) = 0;
+virtual void Uniform3f(GLint location, GLfloat x, GLfloat y, GLfloat z) = 0;
+virtual void Uniform3fv(GLint location, GLsizei count, const GLfloat* v) = 0;
+virtual void Uniform3i(GLint location, GLint x, GLint y, GLint z) = 0;
+virtual void Uniform3iv(GLint location, GLsizei count, const GLint* v) = 0;
+virtual void Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) = 0;
+virtual void Uniform4fv(GLint location, GLsizei count, const GLfloat* v) = 0;
+virtual void Uniform4i(GLint location, GLint x, GLint y, GLint z, GLint w) = 0;
+virtual void Uniform4iv(GLint location, GLsizei count, const GLint* v) = 0;
+virtual void UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) = 0;
+virtual void UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) = 0;
+virtual void UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) = 0;
+virtual void UseProgram(GLuint program) = 0;
+virtual void ValidateProgram(GLuint program) = 0;
+virtual void VertexAttrib1f(GLuint indx, GLfloat x) = 0;
+virtual void VertexAttrib1fv(GLuint indx, const GLfloat* values) = 0;
+virtual void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) = 0;
+virtual void VertexAttrib2fv(GLuint indx, const GLfloat* values) = 0;
+virtual void VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z) = 0;
+virtual void VertexAttrib3fv(GLuint indx, const GLfloat* values) = 0;
+virtual void VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) = 0;
+virtual void VertexAttrib4fv(GLuint indx, const GLfloat* values) = 0;
+virtual void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) = 0;
+virtual void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) = 0;
+virtual void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) = 0;
+virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) = 0;
+virtual void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) = 0;
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) = 0;
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) = 0;
+virtual GLboolean IsQueryEXT(GLuint id) = 0;
+virtual void BeginQueryEXT(GLenum target, GLuint id) = 0;
+virtual void EndQueryEXT(GLenum target) = 0;
+virtual void GetQueryivEXT(GLenum target, GLenum pname, GLint* params) = 0;
+virtual void GetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint* params) = 0;
+virtual void InsertEventMarkerEXT(GLsizei length, const GLchar* marker) = 0;
+virtual void PushGroupMarkerEXT(GLsizei length, const GLchar* marker) = 0;
+virtual void PopGroupMarkerEXT() = 0;
+virtual void GenVertexArraysOES(GLsizei n, GLuint* arrays) = 0;
+virtual void DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) = 0;
+virtual GLboolean IsVertexArrayOES(GLuint array) = 0;
+virtual void BindVertexArrayOES(GLuint array) = 0;
+virtual void SwapBuffers() = 0;
+virtual GLuint GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) = 0;
+virtual GLboolean EnableFeatureCHROMIUM(const char* feature) = 0;
+virtual void* MapBufferCHROMIUM(GLuint target, GLenum access) = 0;
+virtual GLboolean UnmapBufferCHROMIUM(GLuint target) = 0;
+virtual void* MapImageCHROMIUM(GLuint image_id) = 0;
+virtual void UnmapImageCHROMIUM(GLuint image_id) = 0;
+virtual void* MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) = 0;
+virtual void UnmapBufferSubDataCHROMIUM(const void* mem) = 0;
+virtual void* MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) = 0;
+virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) = 0;
+virtual void ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) = 0;
+virtual const GLchar* GetRequestableExtensionsCHROMIUM() = 0;
+virtual void RequestExtensionCHROMIUM(const char* extension) = 0;
+virtual void RateLimitOffscreenContextCHROMIUM() = 0;
+virtual void GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) = 0;
+virtual void GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) = 0;
+virtual GLuint CreateStreamTextureCHROMIUM(GLuint texture) = 0;
+virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) = 0;
+virtual void DestroyImageCHROMIUM(GLuint image_id) = 0;
+virtual void GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) = 0;
+virtual GLuint CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) = 0;
+virtual void GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) = 0;
+virtual void PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) = 0;
+virtual void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) = 0;
+virtual void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) = 0;
+virtual void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) = 0;
+virtual void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) = 0;
+virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) = 0;
+virtual void GenMailboxCHROMIUM(GLbyte* mailbox) = 0;
+virtual void ProduceTextureCHROMIUM(GLenum target, const GLbyte* mailbox) = 0;
+virtual void ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) = 0;
+virtual void ConsumeTextureCHROMIUM(GLenum target, const GLbyte* mailbox) = 0;
+virtual GLuint CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) = 0;
+virtual void BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) = 0;
+virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) = 0;
+virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) = 0;
+virtual void TraceBeginCHROMIUM(const char* name) = 0;
+virtual void TraceEndCHROMIUM() = 0;
+virtual void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) = 0;
+virtual void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) = 0;
+virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) = 0;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() = 0;
+virtual void DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) = 0;
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) = 0;
+virtual GLuint InsertSyncPointCHROMIUM() = 0;
+virtual void WaitSyncPointCHROMIUM(GLuint sync_point) = 0;
+virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) = 0;
+virtual void DiscardBackbufferCHROMIUM() = 0;
+virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) = 0;
+virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) = 0;
+virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) = 0;
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_interface_stub.cc b/gpu/command_buffer/client/gles2_interface_stub.cc
new file mode 100644
index 0000000..3f4d7ba
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_stub.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gles2_interface_stub.h"
+
+namespace gpu {
+namespace gles2 {
+
+GLES2InterfaceStub::GLES2InterfaceStub() {
+}
+
+GLES2InterfaceStub::~GLES2InterfaceStub() {
+}
+
+// Include the auto-generated part of this class. We split this because
+// it means we can easily edit the non-auto generated parts right here in
+// this file instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/gles2_interface_stub.h b/gpu/command_buffer/client/gles2_interface_stub.h
new file mode 100644
index 0000000..cf3fb41
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_stub.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_H_
+
+#include "gpu/command_buffer/client/gles2_interface.h"
+
+namespace gpu {
+namespace gles2 {
+
+// This class a stub to help with mocks for the GLES2Interface class.
+class GLES2InterfaceStub : public GLES2Interface {
+ public:
+ GLES2InterfaceStub();
+ virtual ~GLES2InterfaceStub();
+
+ // Include the auto-generated part of this class. We split this because
+ // it means we can easily edit the non-auto generated parts right here in
+ // this file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_interface_stub_autogen.h"
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_H_
diff --git a/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/gpu/command_buffer/client/gles2_interface_stub_autogen.h
new file mode 100644
index 0000000..67fc2da
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -0,0 +1,536 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_interface_stub.h.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
+
+virtual void ActiveTexture(GLenum texture) OVERRIDE;
+virtual void AttachShader(GLuint program, GLuint shader) OVERRIDE;
+virtual void BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) OVERRIDE;
+virtual void BindBuffer(GLenum target, GLuint buffer) OVERRIDE;
+virtual void BindFramebuffer(GLenum target, GLuint framebuffer) OVERRIDE;
+virtual void BindRenderbuffer(GLenum target, GLuint renderbuffer) OVERRIDE;
+virtual void BindTexture(GLenum target, GLuint texture) OVERRIDE;
+virtual void BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+virtual void BlendEquation(GLenum mode) OVERRIDE;
+virtual void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) OVERRIDE;
+virtual void BlendFunc(GLenum sfactor, GLenum dfactor) OVERRIDE;
+virtual void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) OVERRIDE;
+virtual void BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) OVERRIDE;
+virtual void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) OVERRIDE;
+virtual GLenum CheckFramebufferStatus(GLenum target) OVERRIDE;
+virtual void Clear(GLbitfield mask) OVERRIDE;
+virtual void ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+virtual void ClearDepthf(GLclampf depth) OVERRIDE;
+virtual void ClearStencil(GLint s) OVERRIDE;
+virtual void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) OVERRIDE;
+virtual void CompileShader(GLuint shader) OVERRIDE;
+virtual void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+virtual void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+virtual void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) OVERRIDE;
+virtual void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual GLuint CreateProgram() OVERRIDE;
+virtual GLuint CreateShader(GLenum type) OVERRIDE;
+virtual void CullFace(GLenum mode) OVERRIDE;
+virtual void DeleteBuffers(GLsizei n, const GLuint* buffers) OVERRIDE;
+virtual void DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) OVERRIDE;
+virtual void DeleteProgram(GLuint program) OVERRIDE;
+virtual void DeleteRenderbuffers(GLsizei n,
+ const GLuint* renderbuffers) OVERRIDE;
+virtual void DeleteShader(GLuint shader) OVERRIDE;
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) OVERRIDE;
+virtual void DepthFunc(GLenum func) OVERRIDE;
+virtual void DepthMask(GLboolean flag) OVERRIDE;
+virtual void DepthRangef(GLclampf zNear, GLclampf zFar) OVERRIDE;
+virtual void DetachShader(GLuint program, GLuint shader) OVERRIDE;
+virtual void Disable(GLenum cap) OVERRIDE;
+virtual void DisableVertexAttribArray(GLuint index) OVERRIDE;
+virtual void DrawArrays(GLenum mode, GLint first, GLsizei count) OVERRIDE;
+virtual void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) OVERRIDE;
+virtual void Enable(GLenum cap) OVERRIDE;
+virtual void EnableVertexAttribArray(GLuint index) OVERRIDE;
+virtual void Finish() OVERRIDE;
+virtual void Flush() OVERRIDE;
+virtual void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) OVERRIDE;
+virtual void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) OVERRIDE;
+virtual void FrontFace(GLenum mode) OVERRIDE;
+virtual void GenBuffers(GLsizei n, GLuint* buffers) OVERRIDE;
+virtual void GenerateMipmap(GLenum target) OVERRIDE;
+virtual void GenFramebuffers(GLsizei n, GLuint* framebuffers) OVERRIDE;
+virtual void GenRenderbuffers(GLsizei n, GLuint* renderbuffers) OVERRIDE;
+virtual void GenTextures(GLsizei n, GLuint* textures) OVERRIDE;
+virtual void GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+virtual void GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+virtual void GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) OVERRIDE;
+virtual GLint GetAttribLocation(GLuint program, const char* name) OVERRIDE;
+virtual void GetBooleanv(GLenum pname, GLboolean* params) OVERRIDE;
+virtual void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual GLenum GetError() OVERRIDE;
+virtual void GetFloatv(GLenum pname, GLfloat* params) OVERRIDE;
+virtual void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetIntegerv(GLenum pname, GLint* params) OVERRIDE;
+virtual void GetProgramiv(GLuint program, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+virtual void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetShaderiv(GLuint shader, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+virtual void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) OVERRIDE;
+virtual void GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+virtual const GLubyte* GetString(GLenum name) OVERRIDE;
+virtual void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+virtual void GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetUniformfv(GLuint program,
+ GLint location,
+ GLfloat* params) OVERRIDE;
+virtual void GetUniformiv(GLuint program,
+ GLint location,
+ GLint* params) OVERRIDE;
+virtual GLint GetUniformLocation(GLuint program, const char* name) OVERRIDE;
+virtual void GetVertexAttribfv(GLuint index,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+virtual void GetVertexAttribiv(GLuint index,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) OVERRIDE;
+virtual void Hint(GLenum target, GLenum mode) OVERRIDE;
+virtual GLboolean IsBuffer(GLuint buffer) OVERRIDE;
+virtual GLboolean IsEnabled(GLenum cap) OVERRIDE;
+virtual GLboolean IsFramebuffer(GLuint framebuffer) OVERRIDE;
+virtual GLboolean IsProgram(GLuint program) OVERRIDE;
+virtual GLboolean IsRenderbuffer(GLuint renderbuffer) OVERRIDE;
+virtual GLboolean IsShader(GLuint shader) OVERRIDE;
+virtual GLboolean IsTexture(GLuint texture) OVERRIDE;
+virtual void LineWidth(GLfloat width) OVERRIDE;
+virtual void LinkProgram(GLuint program) OVERRIDE;
+virtual void PixelStorei(GLenum pname, GLint param) OVERRIDE;
+virtual void PolygonOffset(GLfloat factor, GLfloat units) OVERRIDE;
+virtual void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) OVERRIDE;
+virtual void ReleaseShaderCompiler() OVERRIDE;
+virtual void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void SampleCoverage(GLclampf value, GLboolean invert) OVERRIDE;
+virtual void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+virtual void ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) OVERRIDE;
+virtual void ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) OVERRIDE;
+virtual void ShallowFinishCHROMIUM() OVERRIDE;
+virtual void ShallowFlushCHROMIUM() OVERRIDE;
+virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) OVERRIDE;
+virtual void StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) OVERRIDE;
+virtual void StencilMask(GLuint mask) OVERRIDE;
+virtual void StencilMaskSeparate(GLenum face, GLuint mask) OVERRIDE;
+virtual void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) OVERRIDE;
+virtual void StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) OVERRIDE;
+virtual void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void TexParameterf(GLenum target, GLenum pname, GLfloat param) OVERRIDE;
+virtual void TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) OVERRIDE;
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) OVERRIDE;
+virtual void TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) OVERRIDE;
+virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void Uniform1f(GLint location, GLfloat x) OVERRIDE;
+virtual void Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform1i(GLint location, GLint x) OVERRIDE;
+virtual void Uniform1iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform2f(GLint location, GLfloat x, GLfloat y) OVERRIDE;
+virtual void Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform2i(GLint location, GLint x, GLint y) OVERRIDE;
+virtual void Uniform2iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+virtual void Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform3i(GLint location, GLint x, GLint y, GLint z) OVERRIDE;
+virtual void Uniform3iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+virtual void Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) OVERRIDE;
+virtual void Uniform4iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UseProgram(GLuint program) OVERRIDE;
+virtual void ValidateProgram(GLuint program) OVERRIDE;
+virtual void VertexAttrib1f(GLuint indx, GLfloat x) OVERRIDE;
+virtual void VertexAttrib1fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) OVERRIDE;
+virtual void VertexAttrib2fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+virtual void VertexAttrib3fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+virtual void VertexAttrib4fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) OVERRIDE;
+virtual void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+virtual void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) OVERRIDE;
+virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) OVERRIDE;
+virtual void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) OVERRIDE;
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) OVERRIDE;
+virtual GLboolean IsQueryEXT(GLuint id) OVERRIDE;
+virtual void BeginQueryEXT(GLenum target, GLuint id) OVERRIDE;
+virtual void EndQueryEXT(GLenum target) OVERRIDE;
+virtual void GetQueryivEXT(GLenum target, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) OVERRIDE;
+virtual void InsertEventMarkerEXT(GLsizei length,
+ const GLchar* marker) OVERRIDE;
+virtual void PushGroupMarkerEXT(GLsizei length, const GLchar* marker) OVERRIDE;
+virtual void PopGroupMarkerEXT() OVERRIDE;
+virtual void GenVertexArraysOES(GLsizei n, GLuint* arrays) OVERRIDE;
+virtual void DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) OVERRIDE;
+virtual GLboolean IsVertexArrayOES(GLuint array) OVERRIDE;
+virtual void BindVertexArrayOES(GLuint array) OVERRIDE;
+virtual void SwapBuffers() OVERRIDE;
+virtual GLuint GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) OVERRIDE;
+virtual GLboolean EnableFeatureCHROMIUM(const char* feature) OVERRIDE;
+virtual void* MapBufferCHROMIUM(GLuint target, GLenum access) OVERRIDE;
+virtual GLboolean UnmapBufferCHROMIUM(GLuint target) OVERRIDE;
+virtual void* MapImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void UnmapImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void* MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) OVERRIDE;
+virtual void UnmapBufferSubDataCHROMIUM(const void* mem) OVERRIDE;
+virtual void* MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) OVERRIDE;
+virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) OVERRIDE;
+virtual void ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) OVERRIDE;
+virtual const GLchar* GetRequestableExtensionsCHROMIUM() OVERRIDE;
+virtual void RequestExtensionCHROMIUM(const char* extension) OVERRIDE;
+virtual void RateLimitOffscreenContextCHROMIUM() OVERRIDE;
+virtual void GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) OVERRIDE;
+virtual void GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) OVERRIDE;
+virtual GLuint CreateStreamTextureCHROMIUM(GLuint texture) OVERRIDE;
+virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+virtual void DestroyImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual GLuint CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+virtual void GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+virtual void PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) OVERRIDE;
+virtual void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) OVERRIDE;
+virtual void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) OVERRIDE;
+virtual void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) OVERRIDE;
+virtual void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) OVERRIDE;
+virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) OVERRIDE;
+virtual void GenMailboxCHROMIUM(GLbyte* mailbox) OVERRIDE;
+virtual void ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual GLuint CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) OVERRIDE;
+virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+virtual void TraceBeginCHROMIUM(const char* name) OVERRIDE;
+virtual void TraceEndCHROMIUM() OVERRIDE;
+virtual void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) OVERRIDE;
+virtual void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
+virtual void DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) OVERRIDE;
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) OVERRIDE;
+virtual GLuint InsertSyncPointCHROMIUM() OVERRIDE;
+virtual void WaitSyncPointCHROMIUM(GLuint sync_point) OVERRIDE;
+virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) OVERRIDE;
+virtual void DiscardBackbufferCHROMIUM() OVERRIDE;
+virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) OVERRIDE;
+virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) OVERRIDE;
+virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) OVERRIDE;
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
new file mode 100644
index 0000000..cde303f
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -0,0 +1,863 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_interface_stub.cc.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
+
+void GLES2InterfaceStub::ActiveTexture(GLenum /* texture */) {
+}
+void GLES2InterfaceStub::AttachShader(GLuint /* program */,
+ GLuint /* shader */) {
+}
+void GLES2InterfaceStub::BindAttribLocation(GLuint /* program */,
+ GLuint /* index */,
+ const char* /* name */) {
+}
+void GLES2InterfaceStub::BindBuffer(GLenum /* target */, GLuint /* buffer */) {
+}
+void GLES2InterfaceStub::BindFramebuffer(GLenum /* target */,
+ GLuint /* framebuffer */) {
+}
+void GLES2InterfaceStub::BindRenderbuffer(GLenum /* target */,
+ GLuint /* renderbuffer */) {
+}
+void GLES2InterfaceStub::BindTexture(GLenum /* target */,
+ GLuint /* texture */) {
+}
+void GLES2InterfaceStub::BlendColor(GLclampf /* red */,
+ GLclampf /* green */,
+ GLclampf /* blue */,
+ GLclampf /* alpha */) {
+}
+void GLES2InterfaceStub::BlendEquation(GLenum /* mode */) {
+}
+void GLES2InterfaceStub::BlendEquationSeparate(GLenum /* modeRGB */,
+ GLenum /* modeAlpha */) {
+}
+void GLES2InterfaceStub::BlendFunc(GLenum /* sfactor */, GLenum /* dfactor */) {
+}
+void GLES2InterfaceStub::BlendFuncSeparate(GLenum /* srcRGB */,
+ GLenum /* dstRGB */,
+ GLenum /* srcAlpha */,
+ GLenum /* dstAlpha */) {
+}
+void GLES2InterfaceStub::BufferData(GLenum /* target */,
+ GLsizeiptr /* size */,
+ const void* /* data */,
+ GLenum /* usage */) {
+}
+void GLES2InterfaceStub::BufferSubData(GLenum /* target */,
+ GLintptr /* offset */,
+ GLsizeiptr /* size */,
+ const void* /* data */) {
+}
+GLenum GLES2InterfaceStub::CheckFramebufferStatus(GLenum /* target */) {
+ return 0;
+}
+void GLES2InterfaceStub::Clear(GLbitfield /* mask */) {
+}
+void GLES2InterfaceStub::ClearColor(GLclampf /* red */,
+ GLclampf /* green */,
+ GLclampf /* blue */,
+ GLclampf /* alpha */) {
+}
+void GLES2InterfaceStub::ClearDepthf(GLclampf /* depth */) {
+}
+void GLES2InterfaceStub::ClearStencil(GLint /* s */) {
+}
+void GLES2InterfaceStub::ColorMask(GLboolean /* red */,
+ GLboolean /* green */,
+ GLboolean /* blue */,
+ GLboolean /* alpha */) {
+}
+void GLES2InterfaceStub::CompileShader(GLuint /* shader */) {
+}
+void GLES2InterfaceStub::CompressedTexImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLint /* border */,
+ GLsizei /* imageSize */,
+ const void* /* data */) {
+}
+void GLES2InterfaceStub::CompressedTexSubImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLsizei /* imageSize */,
+ const void* /* data */) {
+}
+void GLES2InterfaceStub::CopyTexImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLenum /* internalformat */,
+ GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLint /* border */) {
+}
+void GLES2InterfaceStub::CopyTexSubImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+GLuint GLES2InterfaceStub::CreateProgram() {
+ return 0;
+}
+GLuint GLES2InterfaceStub::CreateShader(GLenum /* type */) {
+ return 0;
+}
+void GLES2InterfaceStub::CullFace(GLenum /* mode */) {
+}
+void GLES2InterfaceStub::DeleteBuffers(GLsizei /* n */,
+ const GLuint* /* buffers */) {
+}
+void GLES2InterfaceStub::DeleteFramebuffers(GLsizei /* n */,
+ const GLuint* /* framebuffers */) {
+}
+void GLES2InterfaceStub::DeleteProgram(GLuint /* program */) {
+}
+void GLES2InterfaceStub::DeleteRenderbuffers(
+ GLsizei /* n */,
+ const GLuint* /* renderbuffers */) {
+}
+void GLES2InterfaceStub::DeleteShader(GLuint /* shader */) {
+}
+void GLES2InterfaceStub::DeleteTextures(GLsizei /* n */,
+ const GLuint* /* textures */) {
+}
+void GLES2InterfaceStub::DepthFunc(GLenum /* func */) {
+}
+void GLES2InterfaceStub::DepthMask(GLboolean /* flag */) {
+}
+void GLES2InterfaceStub::DepthRangef(GLclampf /* zNear */,
+ GLclampf /* zFar */) {
+}
+void GLES2InterfaceStub::DetachShader(GLuint /* program */,
+ GLuint /* shader */) {
+}
+void GLES2InterfaceStub::Disable(GLenum /* cap */) {
+}
+void GLES2InterfaceStub::DisableVertexAttribArray(GLuint /* index */) {
+}
+void GLES2InterfaceStub::DrawArrays(GLenum /* mode */,
+ GLint /* first */,
+ GLsizei /* count */) {
+}
+void GLES2InterfaceStub::DrawElements(GLenum /* mode */,
+ GLsizei /* count */,
+ GLenum /* type */,
+ const void* /* indices */) {
+}
+void GLES2InterfaceStub::Enable(GLenum /* cap */) {
+}
+void GLES2InterfaceStub::EnableVertexAttribArray(GLuint /* index */) {
+}
+void GLES2InterfaceStub::Finish() {
+}
+void GLES2InterfaceStub::Flush() {
+}
+void GLES2InterfaceStub::FramebufferRenderbuffer(
+ GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* renderbuffertarget */,
+ GLuint /* renderbuffer */) {
+}
+void GLES2InterfaceStub::FramebufferTexture2D(GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* textarget */,
+ GLuint /* texture */,
+ GLint /* level */) {
+}
+void GLES2InterfaceStub::FrontFace(GLenum /* mode */) {
+}
+void GLES2InterfaceStub::GenBuffers(GLsizei /* n */, GLuint* /* buffers */) {
+}
+void GLES2InterfaceStub::GenerateMipmap(GLenum /* target */) {
+}
+void GLES2InterfaceStub::GenFramebuffers(GLsizei /* n */,
+ GLuint* /* framebuffers */) {
+}
+void GLES2InterfaceStub::GenRenderbuffers(GLsizei /* n */,
+ GLuint* /* renderbuffers */) {
+}
+void GLES2InterfaceStub::GenTextures(GLsizei /* n */, GLuint* /* textures */) {
+}
+void GLES2InterfaceStub::GetActiveAttrib(GLuint /* program */,
+ GLuint /* index */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ GLint* /* size */,
+ GLenum* /* type */,
+ char* /* name */) {
+}
+void GLES2InterfaceStub::GetActiveUniform(GLuint /* program */,
+ GLuint /* index */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ GLint* /* size */,
+ GLenum* /* type */,
+ char* /* name */) {
+}
+void GLES2InterfaceStub::GetAttachedShaders(GLuint /* program */,
+ GLsizei /* maxcount */,
+ GLsizei* /* count */,
+ GLuint* /* shaders */) {
+}
+GLint GLES2InterfaceStub::GetAttribLocation(GLuint /* program */,
+ const char* /* name */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetBooleanv(GLenum /* pname */,
+ GLboolean* /* params */) {
+}
+void GLES2InterfaceStub::GetBufferParameteriv(GLenum /* target */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+GLenum GLES2InterfaceStub::GetError() {
+ return 0;
+}
+void GLES2InterfaceStub::GetFloatv(GLenum /* pname */, GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::GetFramebufferAttachmentParameteriv(
+ GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetIntegerv(GLenum /* pname */, GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetProgramiv(GLuint /* program */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetProgramInfoLog(GLuint /* program */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* infolog */) {
+}
+void GLES2InterfaceStub::GetRenderbufferParameteriv(GLenum /* target */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetShaderiv(GLuint /* shader */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetShaderInfoLog(GLuint /* shader */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* infolog */) {
+}
+void GLES2InterfaceStub::GetShaderPrecisionFormat(GLenum /* shadertype */,
+ GLenum /* precisiontype */,
+ GLint* /* range */,
+ GLint* /* precision */) {
+}
+void GLES2InterfaceStub::GetShaderSource(GLuint /* shader */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* source */) {
+}
+const GLubyte* GLES2InterfaceStub::GetString(GLenum /* name */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetTexParameterfv(GLenum /* target */,
+ GLenum /* pname */,
+ GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::GetTexParameteriv(GLenum /* target */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetUniformfv(GLuint /* program */,
+ GLint /* location */,
+ GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::GetUniformiv(GLuint /* program */,
+ GLint /* location */,
+ GLint* /* params */) {
+}
+GLint GLES2InterfaceStub::GetUniformLocation(GLuint /* program */,
+ const char* /* name */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetVertexAttribfv(GLuint /* index */,
+ GLenum /* pname */,
+ GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::GetVertexAttribiv(GLuint /* index */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetVertexAttribPointerv(GLuint /* index */,
+ GLenum /* pname */,
+ void** /* pointer */) {
+}
+void GLES2InterfaceStub::Hint(GLenum /* target */, GLenum /* mode */) {
+}
+GLboolean GLES2InterfaceStub::IsBuffer(GLuint /* buffer */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsEnabled(GLenum /* cap */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsFramebuffer(GLuint /* framebuffer */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsProgram(GLuint /* program */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsRenderbuffer(GLuint /* renderbuffer */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsShader(GLuint /* shader */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::IsTexture(GLuint /* texture */) {
+ return 0;
+}
+void GLES2InterfaceStub::LineWidth(GLfloat /* width */) {
+}
+void GLES2InterfaceStub::LinkProgram(GLuint /* program */) {
+}
+void GLES2InterfaceStub::PixelStorei(GLenum /* pname */, GLint /* param */) {
+}
+void GLES2InterfaceStub::PolygonOffset(GLfloat /* factor */,
+ GLfloat /* units */) {
+}
+void GLES2InterfaceStub::ReadPixels(GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLenum /* type */,
+ void* /* pixels */) {
+}
+void GLES2InterfaceStub::ReleaseShaderCompiler() {
+}
+void GLES2InterfaceStub::RenderbufferStorage(GLenum /* target */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::SampleCoverage(GLclampf /* value */,
+ GLboolean /* invert */) {
+}
+void GLES2InterfaceStub::Scissor(GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::ShaderBinary(GLsizei /* n */,
+ const GLuint* /* shaders */,
+ GLenum /* binaryformat */,
+ const void* /* binary */,
+ GLsizei /* length */) {
+}
+void GLES2InterfaceStub::ShaderSource(GLuint /* shader */,
+ GLsizei /* count */,
+ const GLchar* const* /* str */,
+ const GLint* /* length */) {
+}
+void GLES2InterfaceStub::ShallowFinishCHROMIUM() {
+}
+void GLES2InterfaceStub::ShallowFlushCHROMIUM() {
+}
+void GLES2InterfaceStub::StencilFunc(GLenum /* func */,
+ GLint /* ref */,
+ GLuint /* mask */) {
+}
+void GLES2InterfaceStub::StencilFuncSeparate(GLenum /* face */,
+ GLenum /* func */,
+ GLint /* ref */,
+ GLuint /* mask */) {
+}
+void GLES2InterfaceStub::StencilMask(GLuint /* mask */) {
+}
+void GLES2InterfaceStub::StencilMaskSeparate(GLenum /* face */,
+ GLuint /* mask */) {
+}
+void GLES2InterfaceStub::StencilOp(GLenum /* fail */,
+ GLenum /* zfail */,
+ GLenum /* zpass */) {
+}
+void GLES2InterfaceStub::StencilOpSeparate(GLenum /* face */,
+ GLenum /* fail */,
+ GLenum /* zfail */,
+ GLenum /* zpass */) {
+}
+void GLES2InterfaceStub::TexImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLint /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLint /* border */,
+ GLenum /* format */,
+ GLenum /* type */,
+ const void* /* pixels */) {
+}
+void GLES2InterfaceStub::TexParameterf(GLenum /* target */,
+ GLenum /* pname */,
+ GLfloat /* param */) {
+}
+void GLES2InterfaceStub::TexParameterfv(GLenum /* target */,
+ GLenum /* pname */,
+ const GLfloat* /* params */) {
+}
+void GLES2InterfaceStub::TexParameteri(GLenum /* target */,
+ GLenum /* pname */,
+ GLint /* param */) {
+}
+void GLES2InterfaceStub::TexParameteriv(GLenum /* target */,
+ GLenum /* pname */,
+ const GLint* /* params */) {
+}
+void GLES2InterfaceStub::TexSubImage2D(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLenum /* type */,
+ const void* /* pixels */) {
+}
+void GLES2InterfaceStub::Uniform1f(GLint /* location */, GLfloat /* x */) {
+}
+void GLES2InterfaceStub::Uniform1fv(GLint /* location */,
+ GLsizei /* count */,
+ const GLfloat* /* v */) {
+}
+void GLES2InterfaceStub::Uniform1i(GLint /* location */, GLint /* x */) {
+}
+void GLES2InterfaceStub::Uniform1iv(GLint /* location */,
+ GLsizei /* count */,
+ const GLint* /* v */) {
+}
+void GLES2InterfaceStub::Uniform2f(GLint /* location */,
+ GLfloat /* x */,
+ GLfloat /* y */) {
+}
+void GLES2InterfaceStub::Uniform2fv(GLint /* location */,
+ GLsizei /* count */,
+ const GLfloat* /* v */) {
+}
+void GLES2InterfaceStub::Uniform2i(GLint /* location */,
+ GLint /* x */,
+ GLint /* y */) {
+}
+void GLES2InterfaceStub::Uniform2iv(GLint /* location */,
+ GLsizei /* count */,
+ const GLint* /* v */) {
+}
+void GLES2InterfaceStub::Uniform3f(GLint /* location */,
+ GLfloat /* x */,
+ GLfloat /* y */,
+ GLfloat /* z */) {
+}
+void GLES2InterfaceStub::Uniform3fv(GLint /* location */,
+ GLsizei /* count */,
+ const GLfloat* /* v */) {
+}
+void GLES2InterfaceStub::Uniform3i(GLint /* location */,
+ GLint /* x */,
+ GLint /* y */,
+ GLint /* z */) {
+}
+void GLES2InterfaceStub::Uniform3iv(GLint /* location */,
+ GLsizei /* count */,
+ const GLint* /* v */) {
+}
+void GLES2InterfaceStub::Uniform4f(GLint /* location */,
+ GLfloat /* x */,
+ GLfloat /* y */,
+ GLfloat /* z */,
+ GLfloat /* w */) {
+}
+void GLES2InterfaceStub::Uniform4fv(GLint /* location */,
+ GLsizei /* count */,
+ const GLfloat* /* v */) {
+}
+void GLES2InterfaceStub::Uniform4i(GLint /* location */,
+ GLint /* x */,
+ GLint /* y */,
+ GLint /* z */,
+ GLint /* w */) {
+}
+void GLES2InterfaceStub::Uniform4iv(GLint /* location */,
+ GLsizei /* count */,
+ const GLint* /* v */) {
+}
+void GLES2InterfaceStub::UniformMatrix2fv(GLint /* location */,
+ GLsizei /* count */,
+ GLboolean /* transpose */,
+ const GLfloat* /* value */) {
+}
+void GLES2InterfaceStub::UniformMatrix3fv(GLint /* location */,
+ GLsizei /* count */,
+ GLboolean /* transpose */,
+ const GLfloat* /* value */) {
+}
+void GLES2InterfaceStub::UniformMatrix4fv(GLint /* location */,
+ GLsizei /* count */,
+ GLboolean /* transpose */,
+ const GLfloat* /* value */) {
+}
+void GLES2InterfaceStub::UseProgram(GLuint /* program */) {
+}
+void GLES2InterfaceStub::ValidateProgram(GLuint /* program */) {
+}
+void GLES2InterfaceStub::VertexAttrib1f(GLuint /* indx */, GLfloat /* x */) {
+}
+void GLES2InterfaceStub::VertexAttrib1fv(GLuint /* indx */,
+ const GLfloat* /* values */) {
+}
+void GLES2InterfaceStub::VertexAttrib2f(GLuint /* indx */,
+ GLfloat /* x */,
+ GLfloat /* y */) {
+}
+void GLES2InterfaceStub::VertexAttrib2fv(GLuint /* indx */,
+ const GLfloat* /* values */) {
+}
+void GLES2InterfaceStub::VertexAttrib3f(GLuint /* indx */,
+ GLfloat /* x */,
+ GLfloat /* y */,
+ GLfloat /* z */) {
+}
+void GLES2InterfaceStub::VertexAttrib3fv(GLuint /* indx */,
+ const GLfloat* /* values */) {
+}
+void GLES2InterfaceStub::VertexAttrib4f(GLuint /* indx */,
+ GLfloat /* x */,
+ GLfloat /* y */,
+ GLfloat /* z */,
+ GLfloat /* w */) {
+}
+void GLES2InterfaceStub::VertexAttrib4fv(GLuint /* indx */,
+ const GLfloat* /* values */) {
+}
+void GLES2InterfaceStub::VertexAttribPointer(GLuint /* indx */,
+ GLint /* size */,
+ GLenum /* type */,
+ GLboolean /* normalized */,
+ GLsizei /* stride */,
+ const void* /* ptr */) {
+}
+void GLES2InterfaceStub::Viewport(GLint /* x */,
+ GLint /* y */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::BlitFramebufferCHROMIUM(GLint /* srcX0 */,
+ GLint /* srcY0 */,
+ GLint /* srcX1 */,
+ GLint /* srcY1 */,
+ GLint /* dstX0 */,
+ GLint /* dstY0 */,
+ GLint /* dstX1 */,
+ GLint /* dstY1 */,
+ GLbitfield /* mask */,
+ GLenum /* filter */) {
+}
+void GLES2InterfaceStub::RenderbufferStorageMultisampleCHROMIUM(
+ GLenum /* target */,
+ GLsizei /* samples */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::RenderbufferStorageMultisampleEXT(
+ GLenum /* target */,
+ GLsizei /* samples */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::FramebufferTexture2DMultisampleEXT(
+ GLenum /* target */,
+ GLenum /* attachment */,
+ GLenum /* textarget */,
+ GLuint /* texture */,
+ GLint /* level */,
+ GLsizei /* samples */) {
+}
+void GLES2InterfaceStub::TexStorage2DEXT(GLenum /* target */,
+ GLsizei /* levels */,
+ GLenum /* internalFormat */,
+ GLsizei /* width */,
+ GLsizei /* height */) {
+}
+void GLES2InterfaceStub::GenQueriesEXT(GLsizei /* n */, GLuint* /* queries */) {
+}
+void GLES2InterfaceStub::DeleteQueriesEXT(GLsizei /* n */,
+ const GLuint* /* queries */) {
+}
+GLboolean GLES2InterfaceStub::IsQueryEXT(GLuint /* id */) {
+ return 0;
+}
+void GLES2InterfaceStub::BeginQueryEXT(GLenum /* target */, GLuint /* id */) {
+}
+void GLES2InterfaceStub::EndQueryEXT(GLenum /* target */) {
+}
+void GLES2InterfaceStub::GetQueryivEXT(GLenum /* target */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+void GLES2InterfaceStub::GetQueryObjectuivEXT(GLuint /* id */,
+ GLenum /* pname */,
+ GLuint* /* params */) {
+}
+void GLES2InterfaceStub::InsertEventMarkerEXT(GLsizei /* length */,
+ const GLchar* /* marker */) {
+}
+void GLES2InterfaceStub::PushGroupMarkerEXT(GLsizei /* length */,
+ const GLchar* /* marker */) {
+}
+void GLES2InterfaceStub::PopGroupMarkerEXT() {
+}
+void GLES2InterfaceStub::GenVertexArraysOES(GLsizei /* n */,
+ GLuint* /* arrays */) {
+}
+void GLES2InterfaceStub::DeleteVertexArraysOES(GLsizei /* n */,
+ const GLuint* /* arrays */) {
+}
+GLboolean GLES2InterfaceStub::IsVertexArrayOES(GLuint /* array */) {
+ return 0;
+}
+void GLES2InterfaceStub::BindVertexArrayOES(GLuint /* array */) {
+}
+void GLES2InterfaceStub::SwapBuffers() {
+}
+GLuint GLES2InterfaceStub::GetMaxValueInBufferCHROMIUM(GLuint /* buffer_id */,
+ GLsizei /* count */,
+ GLenum /* type */,
+ GLuint /* offset */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::EnableFeatureCHROMIUM(const char* /* feature */) {
+ return 0;
+}
+void* GLES2InterfaceStub::MapBufferCHROMIUM(GLuint /* target */,
+ GLenum /* access */) {
+ return 0;
+}
+GLboolean GLES2InterfaceStub::UnmapBufferCHROMIUM(GLuint /* target */) {
+ return 0;
+}
+void* GLES2InterfaceStub::MapImageCHROMIUM(GLuint /* image_id */) {
+ return 0;
+}
+void GLES2InterfaceStub::UnmapImageCHROMIUM(GLuint /* image_id */) {
+}
+void* GLES2InterfaceStub::MapBufferSubDataCHROMIUM(GLuint /* target */,
+ GLintptr /* offset */,
+ GLsizeiptr /* size */,
+ GLenum /* access */) {
+ return 0;
+}
+void GLES2InterfaceStub::UnmapBufferSubDataCHROMIUM(const void* /* mem */) {
+}
+void* GLES2InterfaceStub::MapTexSubImage2DCHROMIUM(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLenum /* type */,
+ GLenum /* access */) {
+ return 0;
+}
+void GLES2InterfaceStub::UnmapTexSubImage2DCHROMIUM(const void* /* mem */) {
+}
+void GLES2InterfaceStub::ResizeCHROMIUM(GLuint /* width */,
+ GLuint /* height */,
+ GLfloat /* scale_factor */) {
+}
+const GLchar* GLES2InterfaceStub::GetRequestableExtensionsCHROMIUM() {
+ return 0;
+}
+void GLES2InterfaceStub::RequestExtensionCHROMIUM(const char* /* extension */) {
+}
+void GLES2InterfaceStub::RateLimitOffscreenContextCHROMIUM() {
+}
+void GLES2InterfaceStub::GetMultipleIntegervCHROMIUM(const GLenum* /* pnames */,
+ GLuint /* count */,
+ GLint* /* results */,
+ GLsizeiptr /* size */) {
+}
+void GLES2InterfaceStub::GetProgramInfoCHROMIUM(GLuint /* program */,
+ GLsizei /* bufsize */,
+ GLsizei* /* size */,
+ void* /* info */) {
+}
+GLuint GLES2InterfaceStub::CreateStreamTextureCHROMIUM(GLuint /* texture */) {
+ return 0;
+}
+GLuint GLES2InterfaceStub::CreateImageCHROMIUM(GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* internalformat */,
+ GLenum /* usage */) {
+ return 0;
+}
+void GLES2InterfaceStub::DestroyImageCHROMIUM(GLuint /* image_id */) {
+}
+void GLES2InterfaceStub::GetImageParameterivCHROMIUM(GLuint /* image_id */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+}
+GLuint GLES2InterfaceStub::CreateGpuMemoryBufferImageCHROMIUM(
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* internalformat */,
+ GLenum /* usage */) {
+ return 0;
+}
+void GLES2InterfaceStub::GetTranslatedShaderSourceANGLE(GLuint /* shader */,
+ GLsizei /* bufsize */,
+ GLsizei* /* length */,
+ char* /* source */) {
+}
+void GLES2InterfaceStub::PostSubBufferCHROMIUM(GLint /* x */,
+ GLint /* y */,
+ GLint /* width */,
+ GLint /* height */) {
+}
+void GLES2InterfaceStub::TexImageIOSurface2DCHROMIUM(GLenum /* target */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLuint /* ioSurfaceId */,
+ GLuint /* plane */) {
+}
+void GLES2InterfaceStub::CopyTextureCHROMIUM(GLenum /* target */,
+ GLenum /* source_id */,
+ GLenum /* dest_id */,
+ GLint /* level */,
+ GLint /* internalformat */,
+ GLenum /* dest_type */) {
+}
+void GLES2InterfaceStub::DrawArraysInstancedANGLE(GLenum /* mode */,
+ GLint /* first */,
+ GLsizei /* count */,
+ GLsizei /* primcount */) {
+}
+void GLES2InterfaceStub::DrawElementsInstancedANGLE(GLenum /* mode */,
+ GLsizei /* count */,
+ GLenum /* type */,
+ const void* /* indices */,
+ GLsizei /* primcount */) {
+}
+void GLES2InterfaceStub::VertexAttribDivisorANGLE(GLuint /* index */,
+ GLuint /* divisor */) {
+}
+void GLES2InterfaceStub::GenMailboxCHROMIUM(GLbyte* /* mailbox */) {
+}
+void GLES2InterfaceStub::ProduceTextureCHROMIUM(GLenum /* target */,
+ const GLbyte* /* mailbox */) {
+}
+void GLES2InterfaceStub::ProduceTextureDirectCHROMIUM(
+ GLuint /* texture */,
+ GLenum /* target */,
+ const GLbyte* /* mailbox */) {
+}
+void GLES2InterfaceStub::ConsumeTextureCHROMIUM(GLenum /* target */,
+ const GLbyte* /* mailbox */) {
+}
+GLuint GLES2InterfaceStub::CreateAndConsumeTextureCHROMIUM(
+ GLenum /* target */,
+ const GLbyte* /* mailbox */) {
+ return 0;
+}
+void GLES2InterfaceStub::BindUniformLocationCHROMIUM(GLuint /* program */,
+ GLint /* location */,
+ const char* /* name */) {
+}
+void GLES2InterfaceStub::BindTexImage2DCHROMIUM(GLenum /* target */,
+ GLint /* imageId */) {
+}
+void GLES2InterfaceStub::ReleaseTexImage2DCHROMIUM(GLenum /* target */,
+ GLint /* imageId */) {
+}
+void GLES2InterfaceStub::TraceBeginCHROMIUM(const char* /* name */) {
+}
+void GLES2InterfaceStub::TraceEndCHROMIUM() {
+}
+void GLES2InterfaceStub::AsyncTexSubImage2DCHROMIUM(GLenum /* target */,
+ GLint /* level */,
+ GLint /* xoffset */,
+ GLint /* yoffset */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLenum /* format */,
+ GLenum /* type */,
+ const void* /* data */) {
+}
+void GLES2InterfaceStub::AsyncTexImage2DCHROMIUM(GLenum /* target */,
+ GLint /* level */,
+ GLenum /* internalformat */,
+ GLsizei /* width */,
+ GLsizei /* height */,
+ GLint /* border */,
+ GLenum /* format */,
+ GLenum /* type */,
+ const void* /* pixels */) {
+}
+void GLES2InterfaceStub::WaitAsyncTexImage2DCHROMIUM(GLenum /* target */) {
+}
+void GLES2InterfaceStub::WaitAllAsyncTexImage2DCHROMIUM() {
+}
+void GLES2InterfaceStub::DiscardFramebufferEXT(
+ GLenum /* target */,
+ GLsizei /* count */,
+ const GLenum* /* attachments */) {
+}
+void GLES2InterfaceStub::LoseContextCHROMIUM(GLenum /* current */,
+ GLenum /* other */) {
+}
+GLuint GLES2InterfaceStub::InsertSyncPointCHROMIUM() {
+ return 0;
+}
+void GLES2InterfaceStub::WaitSyncPointCHROMIUM(GLuint /* sync_point */) {
+}
+void GLES2InterfaceStub::DrawBuffersEXT(GLsizei /* count */,
+ const GLenum* /* bufs */) {
+}
+void GLES2InterfaceStub::DiscardBackbufferCHROMIUM() {
+}
+void GLES2InterfaceStub::ScheduleOverlayPlaneCHROMIUM(
+ GLint /* plane_z_order */,
+ GLenum /* plane_transform */,
+ GLuint /* overlay_texture_id */,
+ GLint /* bounds_x */,
+ GLint /* bounds_y */,
+ GLint /* bounds_width */,
+ GLint /* bounds_height */,
+ GLfloat /* uv_x */,
+ GLfloat /* uv_y */,
+ GLfloat /* uv_width */,
+ GLfloat /* uv_height */) {
+}
+void GLES2InterfaceStub::MatrixLoadfCHROMIUM(GLenum /* matrixMode */,
+ const GLfloat* /* m */) {
+}
+void GLES2InterfaceStub::MatrixLoadIdentityCHROMIUM(GLenum /* matrixMode */) {
+}
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_lib.cc b/gpu/command_buffer/client/gles2_lib.cc
new file mode 100644
index 0000000..6c8b0ea
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_lib.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gles2_lib.h"
+#include <string.h>
+#include "gpu/command_buffer/common/thread_local.h"
+
+namespace gles2 {
+
+// This is defined in gles2_c_lib_autogen.h
+extern "C" {
+extern const NameToFunc g_gles2_function_table[];
+}
+
+// TODO(kbr): the use of this anonymous namespace core dumps the
+// linker on Mac OS X 10.6 when the symbol ordering file is used
+// namespace {
+static gpu::ThreadLocalKey g_gl_context_key;
+// } // namespace anonymous
+
+void Initialize() {
+ g_gl_context_key = gpu::ThreadLocalAlloc();
+}
+
+void Terminate() {
+ gpu::ThreadLocalFree(g_gl_context_key);
+ g_gl_context_key = 0;
+}
+
+gpu::gles2::GLES2Interface* GetGLContext() {
+ return static_cast<gpu::gles2::GLES2Interface*>(
+ gpu::ThreadLocalGetValue(g_gl_context_key));
+}
+
+void SetGLContext(gpu::gles2::GLES2Interface* context) {
+ gpu::ThreadLocalSetValue(g_gl_context_key, context);
+}
+
+GLES2FunctionPointer GetGLFunctionPointer(const char* name) {
+ for (const NameToFunc* named_function = g_gles2_function_table;
+ named_function->name;
+ ++named_function) {
+ if (!strcmp(name, named_function->name)) {
+ return named_function->func;
+ }
+ }
+ return NULL;
+}
+
+} // namespace gles2
+
+
+
+
diff --git a/gpu/command_buffer/client/gles2_lib.h b/gpu/command_buffer/client/gles2_lib.h
new file mode 100644
index 0000000..b90a2d8
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_lib.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// These functions emulate GLES2 over command buffers.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_LIB_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_LIB_H_
+
+#include "gpu/command_buffer/client/gles2_c_lib_export.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+
+namespace gles2 {
+
+typedef void (*GLES2FunctionPointer)(void);
+
+struct NameToFunc {
+ const char* name;
+ gles2::GLES2FunctionPointer func;
+};
+
+// Initialize the GLES2 library.
+GLES2_C_LIB_EXPORT void Initialize();
+
+// Terminate the GLES2 library.
+GLES2_C_LIB_EXPORT void Terminate();
+
+// Get the current GL context.
+GLES2_C_LIB_EXPORT gpu::gles2::GLES2Interface* GetGLContext();
+
+// Set the current GL context.
+GLES2_C_LIB_EXPORT void SetGLContext(gpu::gles2::GLES2Interface* impl);
+
+GLES2_C_LIB_EXPORT GLES2FunctionPointer GetGLFunctionPointer(const char* name);
+
+} // namespace gles2
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_LIB_H_
diff --git a/gpu/command_buffer/client/gles2_trace_implementation.cc b/gpu/command_buffer/client/gles2_trace_implementation.cc
new file mode 100644
index 0000000..234f243
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_trace_implementation.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gles2_trace_implementation.h"
+#include "gpu/command_buffer/common/trace_event.h"
+
+namespace gpu {
+namespace gles2 {
+
+GLES2TraceImplementation::GLES2TraceImplementation(GLES2Interface* gl)
+ : gl_(gl) {
+}
+
+GLES2TraceImplementation::~GLES2TraceImplementation() {
+}
+
+// Include the auto-generated part of this file. We split this because it means
+// we can easily edit the non-auto generated parts right here in this file
+// instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h"
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/client/gles2_trace_implementation.h b/gpu/command_buffer/client/gles2_trace_implementation.h
new file mode 100644
index 0000000..c215231
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_trace_implementation.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_H_
+
+#include "base/compiler_specific.h"
+#include "gles2_impl_export.h"
+#include "gpu/command_buffer/client/gles2_interface.h"
+
+namespace gpu {
+namespace gles2 {
+
+// GLES2TraceImplementation is calls TRACE for every GL call.
+class GLES2_IMPL_EXPORT GLES2TraceImplementation
+ : NON_EXPORTED_BASE(public GLES2Interface) {
+ public:
+ explicit GLES2TraceImplementation(GLES2Interface* gl);
+ virtual ~GLES2TraceImplementation();
+
+ // Include the auto-generated part of this class. We split this because
+ // it means we can easily edit the non-auto generated parts right here in
+ // this file instead of having to edit some template or the code generator.
+ #include "gpu/command_buffer/client/gles2_trace_implementation_autogen.h"
+
+ private:
+ GLES2Interface* gl_;
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_H_
+
diff --git a/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
new file mode 100644
index 0000000..1083251
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -0,0 +1,536 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_trace_implementation.h
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
+
+virtual void ActiveTexture(GLenum texture) OVERRIDE;
+virtual void AttachShader(GLuint program, GLuint shader) OVERRIDE;
+virtual void BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) OVERRIDE;
+virtual void BindBuffer(GLenum target, GLuint buffer) OVERRIDE;
+virtual void BindFramebuffer(GLenum target, GLuint framebuffer) OVERRIDE;
+virtual void BindRenderbuffer(GLenum target, GLuint renderbuffer) OVERRIDE;
+virtual void BindTexture(GLenum target, GLuint texture) OVERRIDE;
+virtual void BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+virtual void BlendEquation(GLenum mode) OVERRIDE;
+virtual void BlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha) OVERRIDE;
+virtual void BlendFunc(GLenum sfactor, GLenum dfactor) OVERRIDE;
+virtual void BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) OVERRIDE;
+virtual void BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) OVERRIDE;
+virtual void BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) OVERRIDE;
+virtual GLenum CheckFramebufferStatus(GLenum target) OVERRIDE;
+virtual void Clear(GLbitfield mask) OVERRIDE;
+virtual void ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) OVERRIDE;
+virtual void ClearDepthf(GLclampf depth) OVERRIDE;
+virtual void ClearStencil(GLint s) OVERRIDE;
+virtual void ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) OVERRIDE;
+virtual void CompileShader(GLuint shader) OVERRIDE;
+virtual void CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+virtual void CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) OVERRIDE;
+virtual void CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) OVERRIDE;
+virtual void CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual GLuint CreateProgram() OVERRIDE;
+virtual GLuint CreateShader(GLenum type) OVERRIDE;
+virtual void CullFace(GLenum mode) OVERRIDE;
+virtual void DeleteBuffers(GLsizei n, const GLuint* buffers) OVERRIDE;
+virtual void DeleteFramebuffers(GLsizei n, const GLuint* framebuffers) OVERRIDE;
+virtual void DeleteProgram(GLuint program) OVERRIDE;
+virtual void DeleteRenderbuffers(GLsizei n,
+ const GLuint* renderbuffers) OVERRIDE;
+virtual void DeleteShader(GLuint shader) OVERRIDE;
+virtual void DeleteTextures(GLsizei n, const GLuint* textures) OVERRIDE;
+virtual void DepthFunc(GLenum func) OVERRIDE;
+virtual void DepthMask(GLboolean flag) OVERRIDE;
+virtual void DepthRangef(GLclampf zNear, GLclampf zFar) OVERRIDE;
+virtual void DetachShader(GLuint program, GLuint shader) OVERRIDE;
+virtual void Disable(GLenum cap) OVERRIDE;
+virtual void DisableVertexAttribArray(GLuint index) OVERRIDE;
+virtual void DrawArrays(GLenum mode, GLint first, GLsizei count) OVERRIDE;
+virtual void DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) OVERRIDE;
+virtual void Enable(GLenum cap) OVERRIDE;
+virtual void EnableVertexAttribArray(GLuint index) OVERRIDE;
+virtual void Finish() OVERRIDE;
+virtual void Flush() OVERRIDE;
+virtual void FramebufferRenderbuffer(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) OVERRIDE;
+virtual void FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) OVERRIDE;
+virtual void FrontFace(GLenum mode) OVERRIDE;
+virtual void GenBuffers(GLsizei n, GLuint* buffers) OVERRIDE;
+virtual void GenerateMipmap(GLenum target) OVERRIDE;
+virtual void GenFramebuffers(GLsizei n, GLuint* framebuffers) OVERRIDE;
+virtual void GenRenderbuffers(GLsizei n, GLuint* renderbuffers) OVERRIDE;
+virtual void GenTextures(GLsizei n, GLuint* textures) OVERRIDE;
+virtual void GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+virtual void GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+virtual void GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) OVERRIDE;
+virtual GLint GetAttribLocation(GLuint program, const char* name) OVERRIDE;
+virtual void GetBooleanv(GLenum pname, GLboolean* params) OVERRIDE;
+virtual void GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual GLenum GetError() OVERRIDE;
+virtual void GetFloatv(GLenum pname, GLfloat* params) OVERRIDE;
+virtual void GetFramebufferAttachmentParameteriv(GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetIntegerv(GLenum pname, GLint* params) OVERRIDE;
+virtual void GetProgramiv(GLuint program, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+virtual void GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetShaderiv(GLuint shader, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) OVERRIDE;
+virtual void GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) OVERRIDE;
+virtual void GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+virtual const GLubyte* GetString(GLenum name) OVERRIDE;
+virtual void GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+virtual void GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetUniformfv(GLuint program,
+ GLint location,
+ GLfloat* params) OVERRIDE;
+virtual void GetUniformiv(GLuint program,
+ GLint location,
+ GLint* params) OVERRIDE;
+virtual GLint GetUniformLocation(GLuint program, const char* name) OVERRIDE;
+virtual void GetVertexAttribfv(GLuint index,
+ GLenum pname,
+ GLfloat* params) OVERRIDE;
+virtual void GetVertexAttribiv(GLuint index,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual void GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) OVERRIDE;
+virtual void Hint(GLenum target, GLenum mode) OVERRIDE;
+virtual GLboolean IsBuffer(GLuint buffer) OVERRIDE;
+virtual GLboolean IsEnabled(GLenum cap) OVERRIDE;
+virtual GLboolean IsFramebuffer(GLuint framebuffer) OVERRIDE;
+virtual GLboolean IsProgram(GLuint program) OVERRIDE;
+virtual GLboolean IsRenderbuffer(GLuint renderbuffer) OVERRIDE;
+virtual GLboolean IsShader(GLuint shader) OVERRIDE;
+virtual GLboolean IsTexture(GLuint texture) OVERRIDE;
+virtual void LineWidth(GLfloat width) OVERRIDE;
+virtual void LinkProgram(GLuint program) OVERRIDE;
+virtual void PixelStorei(GLenum pname, GLint param) OVERRIDE;
+virtual void PolygonOffset(GLfloat factor, GLfloat units) OVERRIDE;
+virtual void ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) OVERRIDE;
+virtual void ReleaseShaderCompiler() OVERRIDE;
+virtual void RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void SampleCoverage(GLclampf value, GLboolean invert) OVERRIDE;
+virtual void Scissor(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+virtual void ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) OVERRIDE;
+virtual void ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) OVERRIDE;
+virtual void ShallowFinishCHROMIUM() OVERRIDE;
+virtual void ShallowFlushCHROMIUM() OVERRIDE;
+virtual void StencilFunc(GLenum func, GLint ref, GLuint mask) OVERRIDE;
+virtual void StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) OVERRIDE;
+virtual void StencilMask(GLuint mask) OVERRIDE;
+virtual void StencilMaskSeparate(GLenum face, GLuint mask) OVERRIDE;
+virtual void StencilOp(GLenum fail, GLenum zfail, GLenum zpass) OVERRIDE;
+virtual void StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) OVERRIDE;
+virtual void TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void TexParameterf(GLenum target, GLenum pname, GLfloat param) OVERRIDE;
+virtual void TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) OVERRIDE;
+virtual void TexParameteri(GLenum target, GLenum pname, GLint param) OVERRIDE;
+virtual void TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) OVERRIDE;
+virtual void TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void Uniform1f(GLint location, GLfloat x) OVERRIDE;
+virtual void Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform1i(GLint location, GLint x) OVERRIDE;
+virtual void Uniform1iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform2f(GLint location, GLfloat x, GLfloat y) OVERRIDE;
+virtual void Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform2i(GLint location, GLint x, GLint y) OVERRIDE;
+virtual void Uniform2iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+virtual void Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform3i(GLint location, GLint x, GLint y, GLint z) OVERRIDE;
+virtual void Uniform3iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+virtual void Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) OVERRIDE;
+virtual void Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) OVERRIDE;
+virtual void Uniform4iv(GLint location, GLsizei count, const GLint* v) OVERRIDE;
+virtual void UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) OVERRIDE;
+virtual void UseProgram(GLuint program) OVERRIDE;
+virtual void ValidateProgram(GLuint program) OVERRIDE;
+virtual void VertexAttrib1f(GLuint indx, GLfloat x) OVERRIDE;
+virtual void VertexAttrib1fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y) OVERRIDE;
+virtual void VertexAttrib2fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) OVERRIDE;
+virtual void VertexAttrib3fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) OVERRIDE;
+virtual void VertexAttrib4fv(GLuint indx, const GLfloat* values) OVERRIDE;
+virtual void VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) OVERRIDE;
+virtual void Viewport(GLint x, GLint y, GLsizei width, GLsizei height) OVERRIDE;
+virtual void BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) OVERRIDE;
+virtual void RenderbufferStorageMultisampleCHROMIUM(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void RenderbufferStorageMultisampleEXT(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void FramebufferTexture2DMultisampleEXT(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) OVERRIDE;
+virtual void TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) OVERRIDE;
+virtual void GenQueriesEXT(GLsizei n, GLuint* queries) OVERRIDE;
+virtual void DeleteQueriesEXT(GLsizei n, const GLuint* queries) OVERRIDE;
+virtual GLboolean IsQueryEXT(GLuint id) OVERRIDE;
+virtual void BeginQueryEXT(GLenum target, GLuint id) OVERRIDE;
+virtual void EndQueryEXT(GLenum target) OVERRIDE;
+virtual void GetQueryivEXT(GLenum target, GLenum pname, GLint* params) OVERRIDE;
+virtual void GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) OVERRIDE;
+virtual void InsertEventMarkerEXT(GLsizei length,
+ const GLchar* marker) OVERRIDE;
+virtual void PushGroupMarkerEXT(GLsizei length, const GLchar* marker) OVERRIDE;
+virtual void PopGroupMarkerEXT() OVERRIDE;
+virtual void GenVertexArraysOES(GLsizei n, GLuint* arrays) OVERRIDE;
+virtual void DeleteVertexArraysOES(GLsizei n, const GLuint* arrays) OVERRIDE;
+virtual GLboolean IsVertexArrayOES(GLuint array) OVERRIDE;
+virtual void BindVertexArrayOES(GLuint array) OVERRIDE;
+virtual void SwapBuffers() OVERRIDE;
+virtual GLuint GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) OVERRIDE;
+virtual GLboolean EnableFeatureCHROMIUM(const char* feature) OVERRIDE;
+virtual void* MapBufferCHROMIUM(GLuint target, GLenum access) OVERRIDE;
+virtual GLboolean UnmapBufferCHROMIUM(GLuint target) OVERRIDE;
+virtual void* MapImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void UnmapImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void* MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) OVERRIDE;
+virtual void UnmapBufferSubDataCHROMIUM(const void* mem) OVERRIDE;
+virtual void* MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) OVERRIDE;
+virtual void UnmapTexSubImage2DCHROMIUM(const void* mem) OVERRIDE;
+virtual void ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) OVERRIDE;
+virtual const GLchar* GetRequestableExtensionsCHROMIUM() OVERRIDE;
+virtual void RequestExtensionCHROMIUM(const char* extension) OVERRIDE;
+virtual void RateLimitOffscreenContextCHROMIUM() OVERRIDE;
+virtual void GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) OVERRIDE;
+virtual void GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) OVERRIDE;
+virtual GLuint CreateStreamTextureCHROMIUM(GLuint texture) OVERRIDE;
+virtual GLuint CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+virtual void DestroyImageCHROMIUM(GLuint image_id) OVERRIDE;
+virtual void GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+virtual GLuint CreateGpuMemoryBufferImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) OVERRIDE;
+virtual void GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) OVERRIDE;
+virtual void PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) OVERRIDE;
+virtual void TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) OVERRIDE;
+virtual void CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) OVERRIDE;
+virtual void DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) OVERRIDE;
+virtual void DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) OVERRIDE;
+virtual void VertexAttribDivisorANGLE(GLuint index, GLuint divisor) OVERRIDE;
+virtual void GenMailboxCHROMIUM(GLbyte* mailbox) OVERRIDE;
+virtual void ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void ProduceTextureDirectCHROMIUM(GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual GLuint CreateAndConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) OVERRIDE;
+virtual void BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) OVERRIDE;
+virtual void BindTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+virtual void ReleaseTexImage2DCHROMIUM(GLenum target, GLint imageId) OVERRIDE;
+virtual void TraceBeginCHROMIUM(const char* name) OVERRIDE;
+virtual void TraceEndCHROMIUM() OVERRIDE;
+virtual void AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) OVERRIDE;
+virtual void AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) OVERRIDE;
+virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
+virtual void DiscardFramebufferEXT(GLenum target,
+ GLsizei count,
+ const GLenum* attachments) OVERRIDE;
+virtual void LoseContextCHROMIUM(GLenum current, GLenum other) OVERRIDE;
+virtual GLuint InsertSyncPointCHROMIUM() OVERRIDE;
+virtual void WaitSyncPointCHROMIUM(GLuint sync_point) OVERRIDE;
+virtual void DrawBuffersEXT(GLsizei count, const GLenum* bufs) OVERRIDE;
+virtual void DiscardBackbufferCHROMIUM() OVERRIDE;
+virtual void ScheduleOverlayPlaneCHROMIUM(GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) OVERRIDE;
+virtual void MatrixLoadfCHROMIUM(GLenum matrixMode, const GLfloat* m) OVERRIDE;
+virtual void MatrixLoadIdentityCHROMIUM(GLenum matrixMode) OVERRIDE;
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
new file mode 100644
index 0000000..4495967
--- /dev/null
+++ b/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -0,0 +1,1530 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_gles2_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_trace_implementation.cc
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
+
+void GLES2TraceImplementation::ActiveTexture(GLenum texture) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ActiveTexture");
+ gl_->ActiveTexture(texture);
+}
+
+void GLES2TraceImplementation::AttachShader(GLuint program, GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::AttachShader");
+ gl_->AttachShader(program, shader);
+}
+
+void GLES2TraceImplementation::BindAttribLocation(GLuint program,
+ GLuint index,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindAttribLocation");
+ gl_->BindAttribLocation(program, index, name);
+}
+
+void GLES2TraceImplementation::BindBuffer(GLenum target, GLuint buffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindBuffer");
+ gl_->BindBuffer(target, buffer);
+}
+
+void GLES2TraceImplementation::BindFramebuffer(GLenum target,
+ GLuint framebuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindFramebuffer");
+ gl_->BindFramebuffer(target, framebuffer);
+}
+
+void GLES2TraceImplementation::BindRenderbuffer(GLenum target,
+ GLuint renderbuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindRenderbuffer");
+ gl_->BindRenderbuffer(target, renderbuffer);
+}
+
+void GLES2TraceImplementation::BindTexture(GLenum target, GLuint texture) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindTexture");
+ gl_->BindTexture(target, texture);
+}
+
+void GLES2TraceImplementation::BlendColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendColor");
+ gl_->BlendColor(red, green, blue, alpha);
+}
+
+void GLES2TraceImplementation::BlendEquation(GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendEquation");
+ gl_->BlendEquation(mode);
+}
+
+void GLES2TraceImplementation::BlendEquationSeparate(GLenum modeRGB,
+ GLenum modeAlpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendEquationSeparate");
+ gl_->BlendEquationSeparate(modeRGB, modeAlpha);
+}
+
+void GLES2TraceImplementation::BlendFunc(GLenum sfactor, GLenum dfactor) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendFunc");
+ gl_->BlendFunc(sfactor, dfactor);
+}
+
+void GLES2TraceImplementation::BlendFuncSeparate(GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendFuncSeparate");
+ gl_->BlendFuncSeparate(srcRGB, dstRGB, srcAlpha, dstAlpha);
+}
+
+void GLES2TraceImplementation::BufferData(GLenum target,
+ GLsizeiptr size,
+ const void* data,
+ GLenum usage) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BufferData");
+ gl_->BufferData(target, size, data, usage);
+}
+
+void GLES2TraceImplementation::BufferSubData(GLenum target,
+ GLintptr offset,
+ GLsizeiptr size,
+ const void* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BufferSubData");
+ gl_->BufferSubData(target, offset, size, data);
+}
+
+GLenum GLES2TraceImplementation::CheckFramebufferStatus(GLenum target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CheckFramebufferStatus");
+ return gl_->CheckFramebufferStatus(target);
+}
+
+void GLES2TraceImplementation::Clear(GLbitfield mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Clear");
+ gl_->Clear(mask);
+}
+
+void GLES2TraceImplementation::ClearColor(GLclampf red,
+ GLclampf green,
+ GLclampf blue,
+ GLclampf alpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ClearColor");
+ gl_->ClearColor(red, green, blue, alpha);
+}
+
+void GLES2TraceImplementation::ClearDepthf(GLclampf depth) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ClearDepthf");
+ gl_->ClearDepthf(depth);
+}
+
+void GLES2TraceImplementation::ClearStencil(GLint s) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ClearStencil");
+ gl_->ClearStencil(s);
+}
+
+void GLES2TraceImplementation::ColorMask(GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ColorMask");
+ gl_->ColorMask(red, green, blue, alpha);
+}
+
+void GLES2TraceImplementation::CompileShader(GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CompileShader");
+ gl_->CompileShader(shader);
+}
+
+void GLES2TraceImplementation::CompressedTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLsizei imageSize,
+ const void* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CompressedTexImage2D");
+ gl_->CompressedTexImage2D(
+ target, level, internalformat, width, height, border, imageSize, data);
+}
+
+void GLES2TraceImplementation::CompressedTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLsizei imageSize,
+ const void* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CompressedTexSubImage2D");
+ gl_->CompressedTexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, imageSize, data);
+}
+
+void GLES2TraceImplementation::CopyTexImage2D(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLint border) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CopyTexImage2D");
+ gl_->CopyTexImage2D(
+ target, level, internalformat, x, y, width, height, border);
+}
+
+void GLES2TraceImplementation::CopyTexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CopyTexSubImage2D");
+ gl_->CopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height);
+}
+
+GLuint GLES2TraceImplementation::CreateProgram() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CreateProgram");
+ return gl_->CreateProgram();
+}
+
+GLuint GLES2TraceImplementation::CreateShader(GLenum type) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CreateShader");
+ return gl_->CreateShader(type);
+}
+
+void GLES2TraceImplementation::CullFace(GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CullFace");
+ gl_->CullFace(mode);
+}
+
+void GLES2TraceImplementation::DeleteBuffers(GLsizei n, const GLuint* buffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteBuffers");
+ gl_->DeleteBuffers(n, buffers);
+}
+
+void GLES2TraceImplementation::DeleteFramebuffers(GLsizei n,
+ const GLuint* framebuffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteFramebuffers");
+ gl_->DeleteFramebuffers(n, framebuffers);
+}
+
+void GLES2TraceImplementation::DeleteProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteProgram");
+ gl_->DeleteProgram(program);
+}
+
+void GLES2TraceImplementation::DeleteRenderbuffers(
+ GLsizei n,
+ const GLuint* renderbuffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteRenderbuffers");
+ gl_->DeleteRenderbuffers(n, renderbuffers);
+}
+
+void GLES2TraceImplementation::DeleteShader(GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteShader");
+ gl_->DeleteShader(shader);
+}
+
+void GLES2TraceImplementation::DeleteTextures(GLsizei n,
+ const GLuint* textures) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteTextures");
+ gl_->DeleteTextures(n, textures);
+}
+
+void GLES2TraceImplementation::DepthFunc(GLenum func) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DepthFunc");
+ gl_->DepthFunc(func);
+}
+
+void GLES2TraceImplementation::DepthMask(GLboolean flag) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DepthMask");
+ gl_->DepthMask(flag);
+}
+
+void GLES2TraceImplementation::DepthRangef(GLclampf zNear, GLclampf zFar) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DepthRangef");
+ gl_->DepthRangef(zNear, zFar);
+}
+
+void GLES2TraceImplementation::DetachShader(GLuint program, GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DetachShader");
+ gl_->DetachShader(program, shader);
+}
+
+void GLES2TraceImplementation::Disable(GLenum cap) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Disable");
+ gl_->Disable(cap);
+}
+
+void GLES2TraceImplementation::DisableVertexAttribArray(GLuint index) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DisableVertexAttribArray");
+ gl_->DisableVertexAttribArray(index);
+}
+
+void GLES2TraceImplementation::DrawArrays(GLenum mode,
+ GLint first,
+ GLsizei count) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawArrays");
+ gl_->DrawArrays(mode, first, count);
+}
+
+void GLES2TraceImplementation::DrawElements(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawElements");
+ gl_->DrawElements(mode, count, type, indices);
+}
+
+void GLES2TraceImplementation::Enable(GLenum cap) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Enable");
+ gl_->Enable(cap);
+}
+
+void GLES2TraceImplementation::EnableVertexAttribArray(GLuint index) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EnableVertexAttribArray");
+ gl_->EnableVertexAttribArray(index);
+}
+
+void GLES2TraceImplementation::Finish() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Finish");
+ gl_->Finish();
+}
+
+void GLES2TraceImplementation::Flush() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Flush");
+ gl_->Flush();
+}
+
+void GLES2TraceImplementation::FramebufferRenderbuffer(
+ GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::FramebufferRenderbuffer");
+ gl_->FramebufferRenderbuffer(
+ target, attachment, renderbuffertarget, renderbuffer);
+}
+
+void GLES2TraceImplementation::FramebufferTexture2D(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::FramebufferTexture2D");
+ gl_->FramebufferTexture2D(target, attachment, textarget, texture, level);
+}
+
+void GLES2TraceImplementation::FrontFace(GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::FrontFace");
+ gl_->FrontFace(mode);
+}
+
+void GLES2TraceImplementation::GenBuffers(GLsizei n, GLuint* buffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenBuffers");
+ gl_->GenBuffers(n, buffers);
+}
+
+void GLES2TraceImplementation::GenerateMipmap(GLenum target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenerateMipmap");
+ gl_->GenerateMipmap(target);
+}
+
+void GLES2TraceImplementation::GenFramebuffers(GLsizei n,
+ GLuint* framebuffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenFramebuffers");
+ gl_->GenFramebuffers(n, framebuffers);
+}
+
+void GLES2TraceImplementation::GenRenderbuffers(GLsizei n,
+ GLuint* renderbuffers) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenRenderbuffers");
+ gl_->GenRenderbuffers(n, renderbuffers);
+}
+
+void GLES2TraceImplementation::GenTextures(GLsizei n, GLuint* textures) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenTextures");
+ gl_->GenTextures(n, textures);
+}
+
+void GLES2TraceImplementation::GetActiveAttrib(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetActiveAttrib");
+ gl_->GetActiveAttrib(program, index, bufsize, length, size, type, name);
+}
+
+void GLES2TraceImplementation::GetActiveUniform(GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetActiveUniform");
+ gl_->GetActiveUniform(program, index, bufsize, length, size, type, name);
+}
+
+void GLES2TraceImplementation::GetAttachedShaders(GLuint program,
+ GLsizei maxcount,
+ GLsizei* count,
+ GLuint* shaders) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetAttachedShaders");
+ gl_->GetAttachedShaders(program, maxcount, count, shaders);
+}
+
+GLint GLES2TraceImplementation::GetAttribLocation(GLuint program,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetAttribLocation");
+ return gl_->GetAttribLocation(program, name);
+}
+
+void GLES2TraceImplementation::GetBooleanv(GLenum pname, GLboolean* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetBooleanv");
+ gl_->GetBooleanv(pname, params);
+}
+
+void GLES2TraceImplementation::GetBufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetBufferParameteriv");
+ gl_->GetBufferParameteriv(target, pname, params);
+}
+
+GLenum GLES2TraceImplementation::GetError() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetError");
+ return gl_->GetError();
+}
+
+void GLES2TraceImplementation::GetFloatv(GLenum pname, GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetFloatv");
+ gl_->GetFloatv(pname, params);
+}
+
+void GLES2TraceImplementation::GetFramebufferAttachmentParameteriv(
+ GLenum target,
+ GLenum attachment,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::GetFramebufferAttachmentParameteriv");
+ gl_->GetFramebufferAttachmentParameteriv(target, attachment, pname, params);
+}
+
+void GLES2TraceImplementation::GetIntegerv(GLenum pname, GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetIntegerv");
+ gl_->GetIntegerv(pname, params);
+}
+
+void GLES2TraceImplementation::GetProgramiv(GLuint program,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramiv");
+ gl_->GetProgramiv(program, pname, params);
+}
+
+void GLES2TraceImplementation::GetProgramInfoLog(GLuint program,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramInfoLog");
+ gl_->GetProgramInfoLog(program, bufsize, length, infolog);
+}
+
+void GLES2TraceImplementation::GetRenderbufferParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetRenderbufferParameteriv");
+ gl_->GetRenderbufferParameteriv(target, pname, params);
+}
+
+void GLES2TraceImplementation::GetShaderiv(GLuint shader,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetShaderiv");
+ gl_->GetShaderiv(shader, pname, params);
+}
+
+void GLES2TraceImplementation::GetShaderInfoLog(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* infolog) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetShaderInfoLog");
+ gl_->GetShaderInfoLog(shader, bufsize, length, infolog);
+}
+
+void GLES2TraceImplementation::GetShaderPrecisionFormat(GLenum shadertype,
+ GLenum precisiontype,
+ GLint* range,
+ GLint* precision) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetShaderPrecisionFormat");
+ gl_->GetShaderPrecisionFormat(shadertype, precisiontype, range, precision);
+}
+
+void GLES2TraceImplementation::GetShaderSource(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetShaderSource");
+ gl_->GetShaderSource(shader, bufsize, length, source);
+}
+
+const GLubyte* GLES2TraceImplementation::GetString(GLenum name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetString");
+ return gl_->GetString(name);
+}
+
+void GLES2TraceImplementation::GetTexParameterfv(GLenum target,
+ GLenum pname,
+ GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetTexParameterfv");
+ gl_->GetTexParameterfv(target, pname, params);
+}
+
+void GLES2TraceImplementation::GetTexParameteriv(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetTexParameteriv");
+ gl_->GetTexParameteriv(target, pname, params);
+}
+
+void GLES2TraceImplementation::GetUniformfv(GLuint program,
+ GLint location,
+ GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetUniformfv");
+ gl_->GetUniformfv(program, location, params);
+}
+
+void GLES2TraceImplementation::GetUniformiv(GLuint program,
+ GLint location,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetUniformiv");
+ gl_->GetUniformiv(program, location, params);
+}
+
+GLint GLES2TraceImplementation::GetUniformLocation(GLuint program,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetUniformLocation");
+ return gl_->GetUniformLocation(program, name);
+}
+
+void GLES2TraceImplementation::GetVertexAttribfv(GLuint index,
+ GLenum pname,
+ GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetVertexAttribfv");
+ gl_->GetVertexAttribfv(index, pname, params);
+}
+
+void GLES2TraceImplementation::GetVertexAttribiv(GLuint index,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetVertexAttribiv");
+ gl_->GetVertexAttribiv(index, pname, params);
+}
+
+void GLES2TraceImplementation::GetVertexAttribPointerv(GLuint index,
+ GLenum pname,
+ void** pointer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetVertexAttribPointerv");
+ gl_->GetVertexAttribPointerv(index, pname, pointer);
+}
+
+void GLES2TraceImplementation::Hint(GLenum target, GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Hint");
+ gl_->Hint(target, mode);
+}
+
+GLboolean GLES2TraceImplementation::IsBuffer(GLuint buffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsBuffer");
+ return gl_->IsBuffer(buffer);
+}
+
+GLboolean GLES2TraceImplementation::IsEnabled(GLenum cap) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsEnabled");
+ return gl_->IsEnabled(cap);
+}
+
+GLboolean GLES2TraceImplementation::IsFramebuffer(GLuint framebuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsFramebuffer");
+ return gl_->IsFramebuffer(framebuffer);
+}
+
+GLboolean GLES2TraceImplementation::IsProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsProgram");
+ return gl_->IsProgram(program);
+}
+
+GLboolean GLES2TraceImplementation::IsRenderbuffer(GLuint renderbuffer) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsRenderbuffer");
+ return gl_->IsRenderbuffer(renderbuffer);
+}
+
+GLboolean GLES2TraceImplementation::IsShader(GLuint shader) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsShader");
+ return gl_->IsShader(shader);
+}
+
+GLboolean GLES2TraceImplementation::IsTexture(GLuint texture) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsTexture");
+ return gl_->IsTexture(texture);
+}
+
+void GLES2TraceImplementation::LineWidth(GLfloat width) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::LineWidth");
+ gl_->LineWidth(width);
+}
+
+void GLES2TraceImplementation::LinkProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::LinkProgram");
+ gl_->LinkProgram(program);
+}
+
+void GLES2TraceImplementation::PixelStorei(GLenum pname, GLint param) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PixelStorei");
+ gl_->PixelStorei(pname, param);
+}
+
+void GLES2TraceImplementation::PolygonOffset(GLfloat factor, GLfloat units) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PolygonOffset");
+ gl_->PolygonOffset(factor, units);
+}
+
+void GLES2TraceImplementation::ReadPixels(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ void* pixels) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ReadPixels");
+ gl_->ReadPixels(x, y, width, height, format, type, pixels);
+}
+
+void GLES2TraceImplementation::ReleaseShaderCompiler() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ReleaseShaderCompiler");
+ gl_->ReleaseShaderCompiler();
+}
+
+void GLES2TraceImplementation::RenderbufferStorage(GLenum target,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::RenderbufferStorage");
+ gl_->RenderbufferStorage(target, internalformat, width, height);
+}
+
+void GLES2TraceImplementation::SampleCoverage(GLclampf value,
+ GLboolean invert) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::SampleCoverage");
+ gl_->SampleCoverage(value, invert);
+}
+
+void GLES2TraceImplementation::Scissor(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Scissor");
+ gl_->Scissor(x, y, width, height);
+}
+
+void GLES2TraceImplementation::ShaderBinary(GLsizei n,
+ const GLuint* shaders,
+ GLenum binaryformat,
+ const void* binary,
+ GLsizei length) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ShaderBinary");
+ gl_->ShaderBinary(n, shaders, binaryformat, binary, length);
+}
+
+void GLES2TraceImplementation::ShaderSource(GLuint shader,
+ GLsizei count,
+ const GLchar* const* str,
+ const GLint* length) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ShaderSource");
+ gl_->ShaderSource(shader, count, str, length);
+}
+
+void GLES2TraceImplementation::ShallowFinishCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ShallowFinishCHROMIUM");
+ gl_->ShallowFinishCHROMIUM();
+}
+
+void GLES2TraceImplementation::ShallowFlushCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ShallowFlushCHROMIUM");
+ gl_->ShallowFlushCHROMIUM();
+}
+
+void GLES2TraceImplementation::StencilFunc(GLenum func,
+ GLint ref,
+ GLuint mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilFunc");
+ gl_->StencilFunc(func, ref, mask);
+}
+
+void GLES2TraceImplementation::StencilFuncSeparate(GLenum face,
+ GLenum func,
+ GLint ref,
+ GLuint mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilFuncSeparate");
+ gl_->StencilFuncSeparate(face, func, ref, mask);
+}
+
+void GLES2TraceImplementation::StencilMask(GLuint mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilMask");
+ gl_->StencilMask(mask);
+}
+
+void GLES2TraceImplementation::StencilMaskSeparate(GLenum face, GLuint mask) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilMaskSeparate");
+ gl_->StencilMaskSeparate(face, mask);
+}
+
+void GLES2TraceImplementation::StencilOp(GLenum fail,
+ GLenum zfail,
+ GLenum zpass) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilOp");
+ gl_->StencilOp(fail, zfail, zpass);
+}
+
+void GLES2TraceImplementation::StencilOpSeparate(GLenum face,
+ GLenum fail,
+ GLenum zfail,
+ GLenum zpass) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::StencilOpSeparate");
+ gl_->StencilOpSeparate(face, fail, zfail, zpass);
+}
+
+void GLES2TraceImplementation::TexImage2D(GLenum target,
+ GLint level,
+ GLint internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexImage2D");
+ gl_->TexImage2D(target,
+ level,
+ internalformat,
+ width,
+ height,
+ border,
+ format,
+ type,
+ pixels);
+}
+
+void GLES2TraceImplementation::TexParameterf(GLenum target,
+ GLenum pname,
+ GLfloat param) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexParameterf");
+ gl_->TexParameterf(target, pname, param);
+}
+
+void GLES2TraceImplementation::TexParameterfv(GLenum target,
+ GLenum pname,
+ const GLfloat* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexParameterfv");
+ gl_->TexParameterfv(target, pname, params);
+}
+
+void GLES2TraceImplementation::TexParameteri(GLenum target,
+ GLenum pname,
+ GLint param) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexParameteri");
+ gl_->TexParameteri(target, pname, param);
+}
+
+void GLES2TraceImplementation::TexParameteriv(GLenum target,
+ GLenum pname,
+ const GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexParameteriv");
+ gl_->TexParameteriv(target, pname, params);
+}
+
+void GLES2TraceImplementation::TexSubImage2D(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexSubImage2D");
+ gl_->TexSubImage2D(
+ target, level, xoffset, yoffset, width, height, format, type, pixels);
+}
+
+void GLES2TraceImplementation::Uniform1f(GLint location, GLfloat x) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform1f");
+ gl_->Uniform1f(location, x);
+}
+
+void GLES2TraceImplementation::Uniform1fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform1fv");
+ gl_->Uniform1fv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform1i(GLint location, GLint x) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform1i");
+ gl_->Uniform1i(location, x);
+}
+
+void GLES2TraceImplementation::Uniform1iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform1iv");
+ gl_->Uniform1iv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform2f(GLint location, GLfloat x, GLfloat y) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform2f");
+ gl_->Uniform2f(location, x, y);
+}
+
+void GLES2TraceImplementation::Uniform2fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform2fv");
+ gl_->Uniform2fv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform2i(GLint location, GLint x, GLint y) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform2i");
+ gl_->Uniform2i(location, x, y);
+}
+
+void GLES2TraceImplementation::Uniform2iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform2iv");
+ gl_->Uniform2iv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform3f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform3f");
+ gl_->Uniform3f(location, x, y, z);
+}
+
+void GLES2TraceImplementation::Uniform3fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform3fv");
+ gl_->Uniform3fv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform3i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform3i");
+ gl_->Uniform3i(location, x, y, z);
+}
+
+void GLES2TraceImplementation::Uniform3iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform3iv");
+ gl_->Uniform3iv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform4f(GLint location,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform4f");
+ gl_->Uniform4f(location, x, y, z, w);
+}
+
+void GLES2TraceImplementation::Uniform4fv(GLint location,
+ GLsizei count,
+ const GLfloat* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform4fv");
+ gl_->Uniform4fv(location, count, v);
+}
+
+void GLES2TraceImplementation::Uniform4i(GLint location,
+ GLint x,
+ GLint y,
+ GLint z,
+ GLint w) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform4i");
+ gl_->Uniform4i(location, x, y, z, w);
+}
+
+void GLES2TraceImplementation::Uniform4iv(GLint location,
+ GLsizei count,
+ const GLint* v) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Uniform4iv");
+ gl_->Uniform4iv(location, count, v);
+}
+
+void GLES2TraceImplementation::UniformMatrix2fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UniformMatrix2fv");
+ gl_->UniformMatrix2fv(location, count, transpose, value);
+}
+
+void GLES2TraceImplementation::UniformMatrix3fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UniformMatrix3fv");
+ gl_->UniformMatrix3fv(location, count, transpose, value);
+}
+
+void GLES2TraceImplementation::UniformMatrix4fv(GLint location,
+ GLsizei count,
+ GLboolean transpose,
+ const GLfloat* value) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UniformMatrix4fv");
+ gl_->UniformMatrix4fv(location, count, transpose, value);
+}
+
+void GLES2TraceImplementation::UseProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UseProgram");
+ gl_->UseProgram(program);
+}
+
+void GLES2TraceImplementation::ValidateProgram(GLuint program) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ValidateProgram");
+ gl_->ValidateProgram(program);
+}
+
+void GLES2TraceImplementation::VertexAttrib1f(GLuint indx, GLfloat x) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib1f");
+ gl_->VertexAttrib1f(indx, x);
+}
+
+void GLES2TraceImplementation::VertexAttrib1fv(GLuint indx,
+ const GLfloat* values) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib1fv");
+ gl_->VertexAttrib1fv(indx, values);
+}
+
+void GLES2TraceImplementation::VertexAttrib2f(GLuint indx,
+ GLfloat x,
+ GLfloat y) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib2f");
+ gl_->VertexAttrib2f(indx, x, y);
+}
+
+void GLES2TraceImplementation::VertexAttrib2fv(GLuint indx,
+ const GLfloat* values) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib2fv");
+ gl_->VertexAttrib2fv(indx, values);
+}
+
+void GLES2TraceImplementation::VertexAttrib3f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib3f");
+ gl_->VertexAttrib3f(indx, x, y, z);
+}
+
+void GLES2TraceImplementation::VertexAttrib3fv(GLuint indx,
+ const GLfloat* values) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib3fv");
+ gl_->VertexAttrib3fv(indx, values);
+}
+
+void GLES2TraceImplementation::VertexAttrib4f(GLuint indx,
+ GLfloat x,
+ GLfloat y,
+ GLfloat z,
+ GLfloat w) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib4f");
+ gl_->VertexAttrib4f(indx, x, y, z, w);
+}
+
+void GLES2TraceImplementation::VertexAttrib4fv(GLuint indx,
+ const GLfloat* values) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttrib4fv");
+ gl_->VertexAttrib4fv(indx, values);
+}
+
+void GLES2TraceImplementation::VertexAttribPointer(GLuint indx,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttribPointer");
+ gl_->VertexAttribPointer(indx, size, type, normalized, stride, ptr);
+}
+
+void GLES2TraceImplementation::Viewport(GLint x,
+ GLint y,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::Viewport");
+ gl_->Viewport(x, y, width, height);
+}
+
+void GLES2TraceImplementation::BlitFramebufferCHROMIUM(GLint srcX0,
+ GLint srcY0,
+ GLint srcX1,
+ GLint srcY1,
+ GLint dstX0,
+ GLint dstY0,
+ GLint dstX1,
+ GLint dstY1,
+ GLbitfield mask,
+ GLenum filter) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlitFramebufferCHROMIUM");
+ gl_->BlitFramebufferCHROMIUM(
+ srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+}
+
+void GLES2TraceImplementation::RenderbufferStorageMultisampleCHROMIUM(
+ GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::RenderbufferStorageMultisampleCHROMIUM");
+ gl_->RenderbufferStorageMultisampleCHROMIUM(
+ target, samples, internalformat, width, height);
+}
+
+void GLES2TraceImplementation::RenderbufferStorageMultisampleEXT(
+ GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::RenderbufferStorageMultisampleEXT");
+ gl_->RenderbufferStorageMultisampleEXT(
+ target, samples, internalformat, width, height);
+}
+
+void GLES2TraceImplementation::FramebufferTexture2DMultisampleEXT(
+ GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::FramebufferTexture2DMultisampleEXT");
+ gl_->FramebufferTexture2DMultisampleEXT(
+ target, attachment, textarget, texture, level, samples);
+}
+
+void GLES2TraceImplementation::TexStorage2DEXT(GLenum target,
+ GLsizei levels,
+ GLenum internalFormat,
+ GLsizei width,
+ GLsizei height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TexStorage2DEXT");
+ gl_->TexStorage2DEXT(target, levels, internalFormat, width, height);
+}
+
+void GLES2TraceImplementation::GenQueriesEXT(GLsizei n, GLuint* queries) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenQueriesEXT");
+ gl_->GenQueriesEXT(n, queries);
+}
+
+void GLES2TraceImplementation::DeleteQueriesEXT(GLsizei n,
+ const GLuint* queries) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteQueriesEXT");
+ gl_->DeleteQueriesEXT(n, queries);
+}
+
+GLboolean GLES2TraceImplementation::IsQueryEXT(GLuint id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsQueryEXT");
+ return gl_->IsQueryEXT(id);
+}
+
+void GLES2TraceImplementation::BeginQueryEXT(GLenum target, GLuint id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BeginQueryEXT");
+ gl_->BeginQueryEXT(target, id);
+}
+
+void GLES2TraceImplementation::EndQueryEXT(GLenum target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EndQueryEXT");
+ gl_->EndQueryEXT(target);
+}
+
+void GLES2TraceImplementation::GetQueryivEXT(GLenum target,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetQueryivEXT");
+ gl_->GetQueryivEXT(target, pname, params);
+}
+
+void GLES2TraceImplementation::GetQueryObjectuivEXT(GLuint id,
+ GLenum pname,
+ GLuint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetQueryObjectuivEXT");
+ gl_->GetQueryObjectuivEXT(id, pname, params);
+}
+
+void GLES2TraceImplementation::InsertEventMarkerEXT(GLsizei length,
+ const GLchar* marker) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::InsertEventMarkerEXT");
+ gl_->InsertEventMarkerEXT(length, marker);
+}
+
+void GLES2TraceImplementation::PushGroupMarkerEXT(GLsizei length,
+ const GLchar* marker) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PushGroupMarkerEXT");
+ gl_->PushGroupMarkerEXT(length, marker);
+}
+
+void GLES2TraceImplementation::PopGroupMarkerEXT() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PopGroupMarkerEXT");
+ gl_->PopGroupMarkerEXT();
+}
+
+void GLES2TraceImplementation::GenVertexArraysOES(GLsizei n, GLuint* arrays) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenVertexArraysOES");
+ gl_->GenVertexArraysOES(n, arrays);
+}
+
+void GLES2TraceImplementation::DeleteVertexArraysOES(GLsizei n,
+ const GLuint* arrays) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DeleteVertexArraysOES");
+ gl_->DeleteVertexArraysOES(n, arrays);
+}
+
+GLboolean GLES2TraceImplementation::IsVertexArrayOES(GLuint array) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsVertexArrayOES");
+ return gl_->IsVertexArrayOES(array);
+}
+
+void GLES2TraceImplementation::BindVertexArrayOES(GLuint array) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindVertexArrayOES");
+ gl_->BindVertexArrayOES(array);
+}
+
+void GLES2TraceImplementation::SwapBuffers() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::SwapBuffers");
+ gl_->SwapBuffers();
+}
+
+GLuint GLES2TraceImplementation::GetMaxValueInBufferCHROMIUM(GLuint buffer_id,
+ GLsizei count,
+ GLenum type,
+ GLuint offset) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetMaxValueInBufferCHROMIUM");
+ return gl_->GetMaxValueInBufferCHROMIUM(buffer_id, count, type, offset);
+}
+
+GLboolean GLES2TraceImplementation::EnableFeatureCHROMIUM(const char* feature) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EnableFeatureCHROMIUM");
+ return gl_->EnableFeatureCHROMIUM(feature);
+}
+
+void* GLES2TraceImplementation::MapBufferCHROMIUM(GLuint target,
+ GLenum access) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MapBufferCHROMIUM");
+ return gl_->MapBufferCHROMIUM(target, access);
+}
+
+GLboolean GLES2TraceImplementation::UnmapBufferCHROMIUM(GLuint target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UnmapBufferCHROMIUM");
+ return gl_->UnmapBufferCHROMIUM(target);
+}
+
+void* GLES2TraceImplementation::MapImageCHROMIUM(GLuint image_id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MapImageCHROMIUM");
+ return gl_->MapImageCHROMIUM(image_id);
+}
+
+void GLES2TraceImplementation::UnmapImageCHROMIUM(GLuint image_id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::UnmapImageCHROMIUM");
+ gl_->UnmapImageCHROMIUM(image_id);
+}
+
+void* GLES2TraceImplementation::MapBufferSubDataCHROMIUM(GLuint target,
+ GLintptr offset,
+ GLsizeiptr size,
+ GLenum access) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MapBufferSubDataCHROMIUM");
+ return gl_->MapBufferSubDataCHROMIUM(target, offset, size, access);
+}
+
+void GLES2TraceImplementation::UnmapBufferSubDataCHROMIUM(const void* mem) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::UnmapBufferSubDataCHROMIUM");
+ gl_->UnmapBufferSubDataCHROMIUM(mem);
+}
+
+void* GLES2TraceImplementation::MapTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ GLenum access) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MapTexSubImage2DCHROMIUM");
+ return gl_->MapTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type, access);
+}
+
+void GLES2TraceImplementation::UnmapTexSubImage2DCHROMIUM(const void* mem) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::UnmapTexSubImage2DCHROMIUM");
+ gl_->UnmapTexSubImage2DCHROMIUM(mem);
+}
+
+void GLES2TraceImplementation::ResizeCHROMIUM(GLuint width,
+ GLuint height,
+ GLfloat scale_factor) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ResizeCHROMIUM");
+ gl_->ResizeCHROMIUM(width, height, scale_factor);
+}
+
+const GLchar* GLES2TraceImplementation::GetRequestableExtensionsCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetRequestableExtensionsCHROMIUM");
+ return gl_->GetRequestableExtensionsCHROMIUM();
+}
+
+void GLES2TraceImplementation::RequestExtensionCHROMIUM(const char* extension) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::RequestExtensionCHROMIUM");
+ gl_->RequestExtensionCHROMIUM(extension);
+}
+
+void GLES2TraceImplementation::RateLimitOffscreenContextCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::RateLimitOffscreenContextCHROMIUM");
+ gl_->RateLimitOffscreenContextCHROMIUM();
+}
+
+void GLES2TraceImplementation::GetMultipleIntegervCHROMIUM(const GLenum* pnames,
+ GLuint count,
+ GLint* results,
+ GLsizeiptr size) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetMultipleIntegervCHROMIUM");
+ gl_->GetMultipleIntegervCHROMIUM(pnames, count, results, size);
+}
+
+void GLES2TraceImplementation::GetProgramInfoCHROMIUM(GLuint program,
+ GLsizei bufsize,
+ GLsizei* size,
+ void* info) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetProgramInfoCHROMIUM");
+ gl_->GetProgramInfoCHROMIUM(program, bufsize, size, info);
+}
+
+GLuint GLES2TraceImplementation::CreateStreamTextureCHROMIUM(GLuint texture) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::CreateStreamTextureCHROMIUM");
+ return gl_->CreateStreamTextureCHROMIUM(texture);
+}
+
+GLuint GLES2TraceImplementation::CreateImageCHROMIUM(GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CreateImageCHROMIUM");
+ return gl_->CreateImageCHROMIUM(width, height, internalformat, usage);
+}
+
+void GLES2TraceImplementation::DestroyImageCHROMIUM(GLuint image_id) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DestroyImageCHROMIUM");
+ gl_->DestroyImageCHROMIUM(image_id);
+}
+
+void GLES2TraceImplementation::GetImageParameterivCHROMIUM(GLuint image_id,
+ GLenum pname,
+ GLint* params) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetImageParameterivCHROMIUM");
+ gl_->GetImageParameterivCHROMIUM(image_id, pname, params);
+}
+
+GLuint GLES2TraceImplementation::CreateGpuMemoryBufferImageCHROMIUM(
+ GLsizei width,
+ GLsizei height,
+ GLenum internalformat,
+ GLenum usage) {
+ TRACE_EVENT_BINARY_EFFICIENT0(
+ "gpu", "GLES2Trace::CreateGpuMemoryBufferImageCHROMIUM");
+ return gl_->CreateGpuMemoryBufferImageCHROMIUM(
+ width, height, internalformat, usage);
+}
+
+void GLES2TraceImplementation::GetTranslatedShaderSourceANGLE(GLuint shader,
+ GLsizei bufsize,
+ GLsizei* length,
+ char* source) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::GetTranslatedShaderSourceANGLE");
+ gl_->GetTranslatedShaderSourceANGLE(shader, bufsize, length, source);
+}
+
+void GLES2TraceImplementation::PostSubBufferCHROMIUM(GLint x,
+ GLint y,
+ GLint width,
+ GLint height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::PostSubBufferCHROMIUM");
+ gl_->PostSubBufferCHROMIUM(x, y, width, height);
+}
+
+void GLES2TraceImplementation::TexImageIOSurface2DCHROMIUM(GLenum target,
+ GLsizei width,
+ GLsizei height,
+ GLuint ioSurfaceId,
+ GLuint plane) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::TexImageIOSurface2DCHROMIUM");
+ gl_->TexImageIOSurface2DCHROMIUM(target, width, height, ioSurfaceId, plane);
+}
+
+void GLES2TraceImplementation::CopyTextureCHROMIUM(GLenum target,
+ GLenum source_id,
+ GLenum dest_id,
+ GLint level,
+ GLint internalformat,
+ GLenum dest_type) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::CopyTextureCHROMIUM");
+ gl_->CopyTextureCHROMIUM(
+ target, source_id, dest_id, level, internalformat, dest_type);
+}
+
+void GLES2TraceImplementation::DrawArraysInstancedANGLE(GLenum mode,
+ GLint first,
+ GLsizei count,
+ GLsizei primcount) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawArraysInstancedANGLE");
+ gl_->DrawArraysInstancedANGLE(mode, first, count, primcount);
+}
+
+void GLES2TraceImplementation::DrawElementsInstancedANGLE(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const void* indices,
+ GLsizei primcount) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::DrawElementsInstancedANGLE");
+ gl_->DrawElementsInstancedANGLE(mode, count, type, indices, primcount);
+}
+
+void GLES2TraceImplementation::VertexAttribDivisorANGLE(GLuint index,
+ GLuint divisor) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::VertexAttribDivisorANGLE");
+ gl_->VertexAttribDivisorANGLE(index, divisor);
+}
+
+void GLES2TraceImplementation::GenMailboxCHROMIUM(GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GenMailboxCHROMIUM");
+ gl_->GenMailboxCHROMIUM(mailbox);
+}
+
+void GLES2TraceImplementation::ProduceTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ProduceTextureCHROMIUM");
+ gl_->ProduceTextureCHROMIUM(target, mailbox);
+}
+
+void GLES2TraceImplementation::ProduceTextureDirectCHROMIUM(
+ GLuint texture,
+ GLenum target,
+ const GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::ProduceTextureDirectCHROMIUM");
+ gl_->ProduceTextureDirectCHROMIUM(texture, target, mailbox);
+}
+
+void GLES2TraceImplementation::ConsumeTextureCHROMIUM(GLenum target,
+ const GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ConsumeTextureCHROMIUM");
+ gl_->ConsumeTextureCHROMIUM(target, mailbox);
+}
+
+GLuint GLES2TraceImplementation::CreateAndConsumeTextureCHROMIUM(
+ GLenum target,
+ const GLbyte* mailbox) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::CreateAndConsumeTextureCHROMIUM");
+ return gl_->CreateAndConsumeTextureCHROMIUM(target, mailbox);
+}
+
+void GLES2TraceImplementation::BindUniformLocationCHROMIUM(GLuint program,
+ GLint location,
+ const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::BindUniformLocationCHROMIUM");
+ gl_->BindUniformLocationCHROMIUM(program, location, name);
+}
+
+void GLES2TraceImplementation::BindTexImage2DCHROMIUM(GLenum target,
+ GLint imageId) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BindTexImage2DCHROMIUM");
+ gl_->BindTexImage2DCHROMIUM(target, imageId);
+}
+
+void GLES2TraceImplementation::ReleaseTexImage2DCHROMIUM(GLenum target,
+ GLint imageId) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ReleaseTexImage2DCHROMIUM");
+ gl_->ReleaseTexImage2DCHROMIUM(target, imageId);
+}
+
+void GLES2TraceImplementation::TraceBeginCHROMIUM(const char* name) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TraceBeginCHROMIUM");
+ gl_->TraceBeginCHROMIUM(name);
+}
+
+void GLES2TraceImplementation::TraceEndCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::TraceEndCHROMIUM");
+ gl_->TraceEndCHROMIUM();
+}
+
+void GLES2TraceImplementation::AsyncTexSubImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLint yoffset,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ const void* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::AsyncTexSubImage2DCHROMIUM");
+ gl_->AsyncTexSubImage2DCHROMIUM(
+ target, level, xoffset, yoffset, width, height, format, type, data);
+}
+
+void GLES2TraceImplementation::AsyncTexImage2DCHROMIUM(GLenum target,
+ GLint level,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const void* pixels) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::AsyncTexImage2DCHROMIUM");
+ gl_->AsyncTexImage2DCHROMIUM(target,
+ level,
+ internalformat,
+ width,
+ height,
+ border,
+ format,
+ type,
+ pixels);
+}
+
+void GLES2TraceImplementation::WaitAsyncTexImage2DCHROMIUM(GLenum target) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::WaitAsyncTexImage2DCHROMIUM");
+ gl_->WaitAsyncTexImage2DCHROMIUM(target);
+}
+
+void GLES2TraceImplementation::WaitAllAsyncTexImage2DCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::WaitAllAsyncTexImage2DCHROMIUM");
+ gl_->WaitAllAsyncTexImage2DCHROMIUM();
+}
+
+void GLES2TraceImplementation::DiscardFramebufferEXT(
+ GLenum target,
+ GLsizei count,
+ const GLenum* attachments) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DiscardFramebufferEXT");
+ gl_->DiscardFramebufferEXT(target, count, attachments);
+}
+
+void GLES2TraceImplementation::LoseContextCHROMIUM(GLenum current,
+ GLenum other) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::LoseContextCHROMIUM");
+ gl_->LoseContextCHROMIUM(current, other);
+}
+
+GLuint GLES2TraceImplementation::InsertSyncPointCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::InsertSyncPointCHROMIUM");
+ return gl_->InsertSyncPointCHROMIUM();
+}
+
+void GLES2TraceImplementation::WaitSyncPointCHROMIUM(GLuint sync_point) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::WaitSyncPointCHROMIUM");
+ gl_->WaitSyncPointCHROMIUM(sync_point);
+}
+
+void GLES2TraceImplementation::DrawBuffersEXT(GLsizei count,
+ const GLenum* bufs) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DrawBuffersEXT");
+ gl_->DrawBuffersEXT(count, bufs);
+}
+
+void GLES2TraceImplementation::DiscardBackbufferCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DiscardBackbufferCHROMIUM");
+ gl_->DiscardBackbufferCHROMIUM();
+}
+
+void GLES2TraceImplementation::ScheduleOverlayPlaneCHROMIUM(
+ GLint plane_z_order,
+ GLenum plane_transform,
+ GLuint overlay_texture_id,
+ GLint bounds_x,
+ GLint bounds_y,
+ GLint bounds_width,
+ GLint bounds_height,
+ GLfloat uv_x,
+ GLfloat uv_y,
+ GLfloat uv_width,
+ GLfloat uv_height) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::ScheduleOverlayPlaneCHROMIUM");
+ gl_->ScheduleOverlayPlaneCHROMIUM(plane_z_order,
+ plane_transform,
+ overlay_texture_id,
+ bounds_x,
+ bounds_y,
+ bounds_width,
+ bounds_height,
+ uv_x,
+ uv_y,
+ uv_width,
+ uv_height);
+}
+
+void GLES2TraceImplementation::MatrixLoadfCHROMIUM(GLenum matrixMode,
+ const GLfloat* m) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::MatrixLoadfCHROMIUM");
+ gl_->MatrixLoadfCHROMIUM(matrixMode, m);
+}
+
+void GLES2TraceImplementation::MatrixLoadIdentityCHROMIUM(GLenum matrixMode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::MatrixLoadIdentityCHROMIUM");
+ gl_->MatrixLoadIdentityCHROMIUM(matrixMode);
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/gpu/command_buffer/client/gpu_control.h b/gpu/command_buffer/client/gpu_control.h
new file mode 100644
index 0000000..b28757c
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_control.h
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GPU_CONTROL_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GPU_CONTROL_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/gpu_export.h"
+
+namespace gfx {
+class GpuMemoryBuffer;
+}
+
+namespace gpu {
+
+// Common interface for GpuControl implementations.
+class GPU_EXPORT GpuControl {
+ public:
+ GpuControl() {}
+ virtual ~GpuControl() {}
+
+ virtual Capabilities GetCapabilities() = 0;
+
+ // Create a gpu memory buffer of the given dimensions and format. Returns
+ // its ID or -1 on error.
+ virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(
+ size_t width,
+ size_t height,
+ unsigned internalformat,
+ unsigned usage,
+ int32_t* id) = 0;
+
+ // Destroy a gpu memory buffer. The ID must be positive.
+ virtual void DestroyGpuMemoryBuffer(int32_t id) = 0;
+
+ // Inserts a sync point, returning its ID. Sync point IDs are global and can
+ // be used for cross-context synchronization.
+ virtual uint32_t InsertSyncPoint() = 0;
+
+ // Inserts a future sync point, returning its ID. Sync point IDs are global
+ // and can be used for cross-context synchronization. The sync point won't be
+ // retired immediately.
+ virtual uint32_t InsertFutureSyncPoint() = 0;
+
+ // Retires a future sync point. This will signal contexts that are waiting
+ // on it to start executing.
+ virtual void RetireSyncPoint(uint32_t sync_point) = 0;
+
+ // Runs |callback| when a sync point is reached.
+ virtual void SignalSyncPoint(uint32_t sync_point,
+ const base::Closure& callback) = 0;
+
+ // Runs |callback| when a query created via glCreateQueryEXT() has cleared
+ // passed the glEndQueryEXT() point.
+ virtual void SignalQuery(uint32_t query, const base::Closure& callback) = 0;
+
+ virtual void SetSurfaceVisible(bool visible) = 0;
+
+ // Attaches an external stream to the texture given by |texture_id| and
+ // returns a stream identifier.
+ virtual uint32_t CreateStreamTexture(uint32_t texture_id) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(GpuControl);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GPU_CONTROL_H_
diff --git a/gpu/command_buffer/client/gpu_memory_buffer_tracker.cc b/gpu/command_buffer/client/gpu_memory_buffer_tracker.cc
new file mode 100644
index 0000000..9ffe0e3
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_memory_buffer_tracker.cc
@@ -0,0 +1,56 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/gpu_control.h"
+
+namespace gpu {
+namespace gles2 {
+
+GpuMemoryBufferTracker::GpuMemoryBufferTracker(GpuControl* gpu_control)
+ : gpu_control_(gpu_control) {
+}
+
+GpuMemoryBufferTracker::~GpuMemoryBufferTracker() {
+ while (!buffers_.empty()) {
+ RemoveBuffer(buffers_.begin()->first);
+ }
+}
+
+int32 GpuMemoryBufferTracker::CreateBuffer(size_t width,
+ size_t height,
+ int32 internalformat,
+ int32 usage) {
+ int32 image_id = 0;
+ DCHECK(gpu_control_);
+ gfx::GpuMemoryBuffer* buffer = gpu_control_->CreateGpuMemoryBuffer(
+ width, height, internalformat, usage, &image_id);
+ if (!buffer)
+ return 0;
+
+ std::pair<BufferMap::iterator, bool> result =
+ buffers_.insert(std::make_pair(image_id, buffer));
+ DCHECK(result.second);
+
+ return image_id;
+}
+
+gfx::GpuMemoryBuffer* GpuMemoryBufferTracker::GetBuffer(int32 image_id) {
+ BufferMap::iterator it = buffers_.find(image_id);
+ return (it != buffers_.end()) ? it->second : NULL;
+}
+
+void GpuMemoryBufferTracker::RemoveBuffer(int32 image_id) {
+ BufferMap::iterator buffer_it = buffers_.find(image_id);
+ if (buffer_it != buffers_.end())
+ buffers_.erase(buffer_it);
+ DCHECK(gpu_control_);
+ gpu_control_->DestroyGpuMemoryBuffer(image_id);
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/gpu_memory_buffer_tracker.h b/gpu/command_buffer/client/gpu_memory_buffer_tracker.h
new file mode 100644
index 0000000..25ec949
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_memory_buffer_tracker.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GPU_MEMORY_BUFFER_TRACKER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GPU_MEMORY_BUFFER_TRACKER_H_
+
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "gles2_impl_export.h"
+
+namespace gfx {
+class GpuMemoryBuffer;
+}
+
+namespace gpu {
+class GpuControl;
+
+namespace gles2 {
+
+// Tracks GPU memory buffer objects on the client side.
+class GLES2_IMPL_EXPORT GpuMemoryBufferTracker {
+ public:
+ explicit GpuMemoryBufferTracker(GpuControl* gpu_control);
+ virtual ~GpuMemoryBufferTracker();
+
+ int32 CreateBuffer(size_t width,
+ size_t height,
+ int32 internalformat,
+ int32 usage);
+ gfx::GpuMemoryBuffer* GetBuffer(int32 image_id);
+ void RemoveBuffer(int32 image_id);
+
+ private:
+ typedef base::hash_map<int32, gfx::GpuMemoryBuffer*> BufferMap;
+ BufferMap buffers_;
+ GpuControl* gpu_control_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuMemoryBufferTracker);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GPU_MEMORY_BUFFER_TRACKER_H_
diff --git a/gpu/command_buffer/client/gpu_switches.cc b/gpu/command_buffer/client/gpu_switches.cc
new file mode 100644
index 0000000..f933c19
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_switches.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/gpu_switches.h"
+#include "base/basictypes.h"
+
+namespace switches {
+
+// Enable GPU client logging.
+const char kEnableGPUClientLogging[] = "enable-gpu-client-logging";
+
+} // namespace switches
diff --git a/gpu/command_buffer/client/gpu_switches.h b/gpu/command_buffer/client/gpu_switches.h
new file mode 100644
index 0000000..523e5a4
--- /dev/null
+++ b/gpu/command_buffer/client/gpu_switches.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the command-line switches used by gpu/command_buffer/client/.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_GPU_SWITCHES_H_
+#define GPU_COMMAND_BUFFER_CLIENT_GPU_SWITCHES_H_
+
+#include "gpu/command_buffer/client/gles2_impl_export.h"
+
+namespace switches {
+
+GLES2_IMPL_EXPORT extern const char kEnableGPUClientLogging[];
+
+} // namespace switches
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_GPU_SWITCHES_H_
+
diff --git a/gpu/command_buffer/client/mapped_memory.cc b/gpu/command_buffer/client/mapped_memory.cc
new file mode 100644
index 0000000..fc6ca5d
--- /dev/null
+++ b/gpu/command_buffer/client/mapped_memory.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/mapped_memory.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+MemoryChunk::MemoryChunk(int32 shm_id,
+ scoped_refptr<gpu::Buffer> shm,
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback)
+ : shm_id_(shm_id),
+ shm_(shm),
+ allocator_(shm->size(), helper, poll_callback, shm->memory()) {}
+
+MemoryChunk::~MemoryChunk() {}
+
+MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
+ size_t unused_memory_reclaim_limit)
+ : chunk_size_multiple_(1),
+ helper_(helper),
+ poll_callback_(poll_callback),
+ allocated_memory_(0),
+ max_free_bytes_(unused_memory_reclaim_limit) {
+}
+
+MappedMemoryManager::~MappedMemoryManager() {
+ CommandBuffer* cmd_buf = helper_->command_buffer();
+ for (MemoryChunkVector::iterator iter = chunks_.begin();
+ iter != chunks_.end(); ++iter) {
+ MemoryChunk* chunk = *iter;
+ cmd_buf->DestroyTransferBuffer(chunk->shm_id());
+ }
+}
+
+void* MappedMemoryManager::Alloc(
+ unsigned int size, int32* shm_id, unsigned int* shm_offset) {
+ DCHECK(shm_id);
+ DCHECK(shm_offset);
+ if (size <= allocated_memory_) {
+ size_t total_bytes_in_use = 0;
+ // See if any of the chunks can satisfy this request.
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ chunk->FreeUnused();
+ total_bytes_in_use += chunk->bytes_in_use();
+ if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
+ void* mem = chunk->Alloc(size);
+ DCHECK(mem);
+ *shm_id = chunk->shm_id();
+ *shm_offset = chunk->GetOffset(mem);
+ return mem;
+ }
+ }
+
+ // If there is a memory limit being enforced and total free
+ // memory (allocated_memory_ - total_bytes_in_use) is larger than
+ // the limit try waiting.
+ if (max_free_bytes_ != kNoLimit &&
+ (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
+ TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
+ void* mem = chunk->Alloc(size);
+ DCHECK(mem);
+ *shm_id = chunk->shm_id();
+ *shm_offset = chunk->GetOffset(mem);
+ return mem;
+ }
+ }
+ }
+ }
+
+ // Make a new chunk to satisfy the request.
+ CommandBuffer* cmd_buf = helper_->command_buffer();
+ unsigned int chunk_size =
+ ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
+ chunk_size_multiple_;
+ int32 id = -1;
+ scoped_refptr<gpu::Buffer> shm =
+ cmd_buf->CreateTransferBuffer(chunk_size, &id);
+ if (id < 0)
+ return NULL;
+ DCHECK(shm.get());
+ MemoryChunk* mc = new MemoryChunk(id, shm, helper_, poll_callback_);
+ allocated_memory_ += mc->GetSize();
+ chunks_.push_back(mc);
+ void* mem = mc->Alloc(size);
+ DCHECK(mem);
+ *shm_id = mc->shm_id();
+ *shm_offset = mc->GetOffset(mem);
+ return mem;
+}
+
+void MappedMemoryManager::Free(void* pointer) {
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ if (chunk->IsInChunk(pointer)) {
+ chunk->Free(pointer);
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+void MappedMemoryManager::FreePendingToken(void* pointer, int32 token) {
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ if (chunk->IsInChunk(pointer)) {
+ chunk->FreePendingToken(pointer, token);
+ return;
+ }
+ }
+ NOTREACHED();
+}
+
+void MappedMemoryManager::FreeUnused() {
+ CommandBuffer* cmd_buf = helper_->command_buffer();
+ MemoryChunkVector::iterator iter = chunks_.begin();
+ while (iter != chunks_.end()) {
+ MemoryChunk* chunk = *iter;
+ chunk->FreeUnused();
+ if (!chunk->InUse()) {
+ cmd_buf->DestroyTransferBuffer(chunk->shm_id());
+ allocated_memory_ -= chunk->GetSize();
+ iter = chunks_.erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/mapped_memory.h b/gpu/command_buffer/client/mapped_memory.h
new file mode 100644
index 0000000..789e69c
--- /dev/null
+++ b/gpu/command_buffer/client/mapped_memory.h
@@ -0,0 +1,204 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
+#define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
+
+#include <stdint.h>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/scoped_vector.h"
+#include "gpu/command_buffer/client/fenced_allocator.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+
+// Manages a shared memory segment.
+class GPU_EXPORT MemoryChunk {
+ public:
+ MemoryChunk(int32_t shm_id,
+ scoped_refptr<gpu::Buffer> shm,
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback);
+ ~MemoryChunk();
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSizeWithoutWaiting() {
+ return allocator_.GetLargestFreeSize();
+ }
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait.
+ unsigned int GetLargestFreeSizeWithWaiting() {
+ return allocator_.GetLargestFreeOrPendingSize();
+ }
+
+ // Gets the size of the chunk.
+ unsigned int GetSize() const {
+ return static_cast<unsigned int>(shm_->size());
+ }
+
+ // The shared memory id for this chunk.
+ int32_t shm_id() const {
+ return shm_id_;
+ }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ void* Alloc(unsigned int size) {
+ return allocator_.Alloc(size);
+ }
+
+ // Gets the offset to a memory block given the base memory and the address.
+ // It translates NULL to FencedAllocator::kInvalidOffset.
+ unsigned int GetOffset(void* pointer) {
+ return allocator_.GetOffset(pointer);
+ }
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ void Free(void* pointer) {
+ allocator_.Free(pointer);
+ }
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void* pointer, unsigned int token) {
+ allocator_.FreePendingToken(pointer, token);
+ }
+
+ // Frees any blocks whose tokens have passed.
+ void FreeUnused() {
+ allocator_.FreeUnused();
+ }
+
+ // Returns true if pointer is in the range of this block.
+ bool IsInChunk(void* pointer) const {
+ return pointer >= shm_->memory() &&
+ pointer <
+ reinterpret_cast<const int8_t*>(shm_->memory()) + shm_->size();
+ }
+
+ // Returns true of any memory in this chunk is in use.
+ bool InUse() {
+ return allocator_.InUse();
+ }
+
+ size_t bytes_in_use() const {
+ return allocator_.bytes_in_use();
+ }
+
+ private:
+ int32_t shm_id_;
+ scoped_refptr<gpu::Buffer> shm_;
+ FencedAllocatorWrapper allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
+};
+
+// Manages MemoryChunks.
+class GPU_EXPORT MappedMemoryManager {
+ public:
+ enum MemoryLimit {
+ kNoLimit = 0,
+ };
+
+ // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
+ // to be reclaimed before allocating more memory.
+ MappedMemoryManager(CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
+ size_t unused_memory_reclaim_limit);
+
+ ~MappedMemoryManager();
+
+ unsigned int chunk_size_multiple() const {
+ return chunk_size_multiple_;
+ }
+
+ void set_chunk_size_multiple(unsigned int multiple) {
+ chunk_size_multiple_ = multiple;
+ }
+
+ // Allocates a block of memory
+ // Parameters:
+ // size: size of memory to allocate.
+ // shm_id: pointer to variable to receive the shared memory id.
+ // shm_offset: pointer to variable to receive the shared memory offset.
+ // Returns:
+ // pointer to allocated block of memory. NULL if failure.
+ void* Alloc(
+ unsigned int size, int32_t* shm_id, unsigned int* shm_offset);
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ void Free(void* pointer);
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void* pointer, int32_t token);
+
+ // Free Any Shared memory that is not in use.
+ void FreeUnused();
+
+ // Used for testing
+ size_t num_chunks() const {
+ return chunks_.size();
+ }
+
+ size_t bytes_in_use() const {
+ size_t bytes_in_use = 0;
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ bytes_in_use += chunk->bytes_in_use();
+ }
+ return bytes_in_use;
+ }
+
+ // Used for testing
+ size_t allocated_memory() const {
+ return allocated_memory_;
+ }
+
+ private:
+ typedef ScopedVector<MemoryChunk> MemoryChunkVector;
+
+ // size a chunk is rounded up to.
+ unsigned int chunk_size_multiple_;
+ CommandBufferHelper* helper_;
+ base::Closure poll_callback_;
+ MemoryChunkVector chunks_;
+ size_t allocated_memory_;
+ size_t max_free_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
+
diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc
new file mode 100644
index 0000000..963640a
--- /dev/null
+++ b/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -0,0 +1,456 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/mapped_memory.h"
+
+#include <list>
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+class MappedMemoryTestBase : public testing::Test {
+ protected:
+ static const unsigned int kBufferSize = 1024;
+
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock(true));
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
+ .WillRepeatedly(Return(error::kNoError));
+ // Forward the SetToken calls to the engine
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+
+ command_buffer_.reset(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), api_mock_.get(), NULL));
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind(
+ &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+
+ api_mock_->set_engine(gpu_scheduler_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize(kBufferSize);
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetLastState().token;
+ }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<CommandBufferHelper> helper_;
+};
+
+#ifndef _MSC_VER
+const unsigned int MappedMemoryTestBase::kBufferSize;
+#endif
+
+namespace {
+void EmptyPoll() {
+}
+}
+
+// Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
+// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
+// it directly, not through the RPC mechanism), making sure Noops are ignored
+// and SetToken are properly forwarded to the engine.
+class MemoryChunkTest : public MappedMemoryTestBase {
+ protected:
+ static const int32 kShmId = 123;
+ virtual void SetUp() {
+ MappedMemoryTestBase::SetUp();
+ scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
+ shared_memory->CreateAndMapAnonymous(kBufferSize);
+ buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize);
+ chunk_.reset(new MemoryChunk(kShmId,
+ buffer_,
+ helper_.get(),
+ base::Bind(&EmptyPoll)));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ MappedMemoryTestBase::TearDown();
+ }
+
+ uint8* buffer_memory() { return static_cast<uint8*>(buffer_->memory()); }
+
+ scoped_ptr<MemoryChunk> chunk_;
+ scoped_refptr<gpu::Buffer> buffer_;
+};
+
+#ifndef _MSC_VER
+const int32 MemoryChunkTest::kShmId;
+#endif
+
+TEST_F(MemoryChunkTest, Basic) {
+ const unsigned int kSize = 16;
+ EXPECT_EQ(kShmId, chunk_->shm_id());
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetSize());
+ void *pointer = chunk_->Alloc(kSize);
+ ASSERT_TRUE(pointer);
+ EXPECT_LE(buffer_->memory(), static_cast<uint8*>(pointer));
+ EXPECT_GE(kBufferSize,
+ static_cast<uint8*>(pointer) - buffer_memory() + kSize);
+ EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetSize());
+
+ chunk_->Free(pointer);
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
+
+ uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize));
+ ASSERT_TRUE(pointer_char);
+ EXPECT_LE(buffer_memory(), pointer_char);
+ EXPECT_GE(buffer_memory() + kBufferSize, pointer_char + kSize);
+ EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
+ chunk_->Free(pointer_char);
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
+ EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
+}
+
+class MappedMemoryManagerTest : public MappedMemoryTestBase {
+ public:
+ MappedMemoryManager* manager() const {
+ return manager_.get();
+ }
+
+ protected:
+ virtual void SetUp() {
+ MappedMemoryTestBase::SetUp();
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+ manager_.reset();
+ MappedMemoryTestBase::TearDown();
+ }
+
+ scoped_ptr<MappedMemoryManager> manager_;
+};
+
+TEST_F(MappedMemoryManagerTest, Basic) {
+ const unsigned int kSize = 1024;
+ // Check we can alloc.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ ASSERT_TRUE(mem1);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(0u, offset1);
+ // Check if we free and realloc the same size we get the same memory
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ manager_->Free(mem1);
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ EXPECT_EQ(mem1, mem2);
+ EXPECT_EQ(id1, id2);
+ EXPECT_EQ(offset1, offset2);
+ // Check if we allocate again we get different shared memory
+ int32 id3 = -1;
+ unsigned int offset3 = 0xFFFFFFFFU;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ ASSERT_TRUE(mem3 != NULL);
+ EXPECT_NE(mem2, mem3);
+ EXPECT_NE(id2, id3);
+ EXPECT_EQ(0u, offset3);
+ // Free 3 and allocate 2 half size blocks.
+ manager_->Free(mem3);
+ int32 id4 = -1;
+ int32 id5 = -1;
+ unsigned int offset4 = 0xFFFFFFFFU;
+ unsigned int offset5 = 0xFFFFFFFFU;
+ void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4);
+ void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5);
+ ASSERT_TRUE(mem4 != NULL);
+ ASSERT_TRUE(mem5 != NULL);
+ EXPECT_EQ(id3, id4);
+ EXPECT_EQ(id4, id5);
+ EXPECT_EQ(0u, offset4);
+ EXPECT_EQ(kSize / 2u, offset5);
+ manager_->Free(mem4);
+ manager_->Free(mem2);
+ manager_->Free(mem5);
+}
+
+TEST_F(MappedMemoryManagerTest, FreePendingToken) {
+ const unsigned int kSize = 128;
+ const unsigned int kAllocCount = (kBufferSize / kSize) * 2;
+ CHECK(kAllocCount * kSize == kBufferSize * 2);
+
+ // Allocate several buffers across multiple chunks.
+ void *pointers[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ int32 id = -1;
+ unsigned int offset = 0xFFFFFFFFu;
+ pointers[i] = manager_->Alloc(kSize, &id, &offset);
+ EXPECT_TRUE(pointers[i]);
+ EXPECT_NE(id, -1);
+ EXPECT_NE(offset, 0xFFFFFFFFu);
+ }
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ manager_->FreePendingToken(pointers[0], token);
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+ // Force it to read up to the token
+ helper_->Finish();
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, GetToken());
+
+ // This allocation should use the spot just freed above.
+ int32 new_id = -1;
+ unsigned int new_offset = 0xFFFFFFFFu;
+ void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset);
+ EXPECT_TRUE(new_ptr);
+ EXPECT_EQ(new_ptr, pointers[0]);
+ EXPECT_NE(new_id, -1);
+ EXPECT_NE(new_offset, 0xFFFFFFFFu);
+
+ // Free up everything.
+ manager_->Free(new_ptr);
+ for (unsigned int i = 1; i < kAllocCount; ++i) {
+ manager_->Free(pointers[i]);
+ }
+}
+
+TEST_F(MappedMemoryManagerTest, FreeUnused) {
+ int32 id = -1;
+ unsigned int offset = 0xFFFFFFFFU;
+ void* m1 = manager_->Alloc(kBufferSize, &id, &offset);
+ void* m2 = manager_->Alloc(kBufferSize, &id, &offset);
+ ASSERT_TRUE(m1 != NULL);
+ ASSERT_TRUE(m2 != NULL);
+ EXPECT_EQ(2u, manager_->num_chunks());
+ manager_->FreeUnused();
+ EXPECT_EQ(2u, manager_->num_chunks());
+ manager_->Free(m2);
+ EXPECT_EQ(2u, manager_->num_chunks());
+ manager_->FreeUnused();
+ EXPECT_EQ(1u, manager_->num_chunks());
+ manager_->Free(m1);
+ EXPECT_EQ(1u, manager_->num_chunks());
+ manager_->FreeUnused();
+ EXPECT_EQ(0u, manager_->num_chunks());
+}
+
+TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
+ const unsigned int kSize = 1024;
+ manager_->set_chunk_size_multiple(kSize * 2);
+ // Check if we allocate less than the chunk size multiple we get
+ // chunks arounded up.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ int32 id3 = -1;
+ unsigned int offset3 = 0xFFFFFFFFU;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ ASSERT_TRUE(mem1);
+ ASSERT_TRUE(mem2);
+ ASSERT_TRUE(mem3);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(id1, id2);
+ EXPECT_NE(id2, id3);
+ EXPECT_EQ(0u, offset1);
+ EXPECT_EQ(kSize, offset2);
+ EXPECT_EQ(0u, offset3);
+
+ manager_->Free(mem1);
+ manager_->Free(mem2);
+ manager_->Free(mem3);
+}
+
+TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
+ const unsigned int kChunkSize = 2048;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), kChunkSize));
+ manager_->set_chunk_size_multiple(kChunkSize);
+
+ // Allocate one chunk worth of memory.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
+ ASSERT_TRUE(mem1);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(0u, offset1);
+
+ // Allocate half a chunk worth of memory again.
+ // The same chunk will be used.
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2);
+ ASSERT_TRUE(mem2);
+ EXPECT_NE(-1, id2);
+ EXPECT_EQ(0u, offset2);
+
+ // Expect two chunks to be allocated, exceeding the limit,
+ // since all memory is in use.
+ EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
+
+ manager_->Free(mem1);
+ manager_->Free(mem2);
+}
+
+TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
+ const unsigned int kSize = 1024;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), kSize));
+ const unsigned int kChunkSize = 2 * 1024;
+ manager_->set_chunk_size_multiple(kChunkSize);
+
+ // Allocate half a chunk worth of memory.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ ASSERT_TRUE(mem1);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(0u, offset1);
+
+ // Allocate half a chunk worth of memory again.
+ // The same chunk will be used.
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ ASSERT_TRUE(mem2);
+ EXPECT_NE(-1, id2);
+ EXPECT_EQ(kSize, offset2);
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ manager_->FreePendingToken(mem2, token);
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+
+ // Since we didn't call helper_.finish() the token did not pass.
+ // We won't be able to claim the free memory without waiting and
+ // as we've already met the memory limit we'll have to wait
+ // on the token.
+ int32 id3 = -1;
+ unsigned int offset3 = 0xFFFFFFFFU;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ ASSERT_TRUE(mem3);
+ EXPECT_NE(-1, id3);
+ // It will reuse the space from the second allocation just freed.
+ EXPECT_EQ(kSize, offset3);
+
+ // Expect one chunk to be allocated
+ EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
+
+ manager_->Free(mem1);
+ manager_->Free(mem3);
+}
+
+namespace {
+void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) {
+ std::list<void*>::iterator it = list->begin();
+ while (it != list->end()) {
+ void* address = *it;
+ test->manager()->Free(address);
+ it = list->erase(it);
+ }
+}
+}
+
+TEST_F(MappedMemoryManagerTest, Poll) {
+ std::list<void*> unmanaged_memory_list;
+
+ const unsigned int kSize = 1024;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(),
+ base::Bind(&Poll, this, &unmanaged_memory_list),
+ kSize));
+
+ // Allocate kSize bytes. Don't add the address to
+ // the unmanaged memory list, so that it won't be free:ed just yet.
+ int32 id1;
+ unsigned int offset1;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize);
+
+ // Allocate kSize more bytes, and make sure we grew.
+ int32 id2;
+ unsigned int offset2;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
+
+ // Make the unmanaged buffer be released next time FreeUnused() is called
+ // in MappedMemoryManager/FencedAllocator. This happens for example when
+ // allocating new memory.
+ unmanaged_memory_list.push_back(mem1);
+
+ // Allocate kSize more bytes. This should poll unmanaged memory, which now
+ // should free the previously allocated unmanaged memory.
+ int32 id3;
+ unsigned int offset3;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
+
+ manager_->Free(mem2);
+ manager_->Free(mem3);
+ EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0));
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/program_info_manager.cc b/gpu/command_buffer/client/program_info_manager.cc
new file mode 100644
index 0000000..d854aa0
--- /dev/null
+++ b/gpu/command_buffer/client/program_info_manager.cc
@@ -0,0 +1,526 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/program_info_manager.h"
+
+#include <map>
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+
+namespace gpu {
+namespace gles2 {
+
+class NonCachedProgramInfoManager : public ProgramInfoManager {
+ public:
+ NonCachedProgramInfoManager();
+ virtual ~NonCachedProgramInfoManager();
+
+ virtual void CreateInfo(GLuint program) OVERRIDE;
+
+ virtual void DeleteInfo(GLuint program) OVERRIDE;
+
+ virtual bool GetProgramiv(GLES2Implementation* gl,
+ GLuint program,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+ virtual GLint GetAttribLocation(GLES2Implementation* gl,
+ GLuint program,
+ const char* name) OVERRIDE;
+
+ virtual GLint GetUniformLocation(GLES2Implementation* gl,
+ GLuint program,
+ const char* name) OVERRIDE;
+
+ virtual bool GetActiveAttrib(GLES2Implementation* gl,
+ GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+ virtual bool GetActiveUniform(GLES2Implementation* gl,
+ GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+};
+
+NonCachedProgramInfoManager::NonCachedProgramInfoManager() {
+}
+
+NonCachedProgramInfoManager::~NonCachedProgramInfoManager() {
+}
+
+void NonCachedProgramInfoManager::CreateInfo(GLuint /* program */) {
+}
+
+void NonCachedProgramInfoManager::DeleteInfo(GLuint /* program */) {
+}
+
+bool NonCachedProgramInfoManager::GetProgramiv(
+ GLES2Implementation* /* gl */,
+ GLuint /* program */,
+ GLenum /* pname */,
+ GLint* /* params */) {
+ return false;
+}
+
+GLint NonCachedProgramInfoManager::GetAttribLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) {
+ return gl->GetAttribLocationHelper(program, name);
+}
+
+GLint NonCachedProgramInfoManager::GetUniformLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) {
+ return gl->GetUniformLocationHelper(program, name);
+}
+
+bool NonCachedProgramInfoManager::GetActiveAttrib(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) {
+ return gl->GetActiveAttribHelper(
+ program, index, bufsize, length, size, type, name);
+}
+
+bool NonCachedProgramInfoManager::GetActiveUniform(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) {
+ return gl->GetActiveUniformHelper(
+ program, index, bufsize, length, size, type, name);
+}
+
+class CachedProgramInfoManager : public ProgramInfoManager {
+ public:
+ CachedProgramInfoManager();
+ virtual ~CachedProgramInfoManager();
+
+ virtual void CreateInfo(GLuint program) OVERRIDE;
+
+ virtual void DeleteInfo(GLuint program) OVERRIDE;
+
+ virtual bool GetProgramiv(GLES2Implementation* gl,
+ GLuint program,
+ GLenum pname,
+ GLint* params) OVERRIDE;
+
+ virtual GLint GetAttribLocation(GLES2Implementation* gl,
+ GLuint program,
+ const char* name) OVERRIDE;
+
+ virtual GLint GetUniformLocation(GLES2Implementation* gl,
+ GLuint program,
+ const char* name) OVERRIDE;
+
+ virtual bool GetActiveAttrib(GLES2Implementation* gl,
+ GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+ virtual bool GetActiveUniform(GLES2Implementation* gl,
+ GLuint program,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLint* size,
+ GLenum* type,
+ char* name) OVERRIDE;
+
+ private:
+ class Program {
+ public:
+ struct UniformInfo {
+ UniformInfo(GLsizei _size, GLenum _type, const std::string& _name);
+
+ GLsizei size;
+ GLenum type;
+ bool is_array;
+ std::string name;
+ std::vector<GLint> element_locations;
+ };
+ struct VertexAttrib {
+ VertexAttrib(GLsizei _size, GLenum _type, const std::string& _name,
+ GLint _location)
+ : size(_size),
+ type(_type),
+ location(_location),
+ name(_name) {
+ }
+ GLsizei size;
+ GLenum type;
+ GLint location;
+ std::string name;
+ };
+
+ typedef std::vector<UniformInfo> UniformInfoVector;
+ typedef std::vector<VertexAttrib> AttribInfoVector;
+
+ Program();
+
+ const AttribInfoVector& GetAttribInfos() const {
+ return attrib_infos_;
+ }
+
+ const VertexAttrib* GetAttribInfo(GLint index) const {
+ return (static_cast<size_t>(index) < attrib_infos_.size()) ?
+ &attrib_infos_[index] : NULL;
+ }
+
+ GLint GetAttribLocation(const std::string& name) const;
+
+ const UniformInfo* GetUniformInfo(GLint index) const {
+ return (static_cast<size_t>(index) < uniform_infos_.size()) ?
+ &uniform_infos_[index] : NULL;
+ }
+
+ // Gets the location of a uniform by name.
+ GLint GetUniformLocation(const std::string& name) const;
+
+ bool GetProgramiv(GLenum pname, GLint* params);
+
+ // Updates the program info after a successful link.
+ void Update(GLES2Implementation* gl, GLuint program);
+
+ private:
+ bool cached_;
+
+ GLsizei max_attrib_name_length_;
+
+ // Attrib by index.
+ AttribInfoVector attrib_infos_;
+
+ GLsizei max_uniform_name_length_;
+
+ // Uniform info by index.
+ UniformInfoVector uniform_infos_;
+
+ // This is true if glLinkProgram was successful last time it was called.
+ bool link_status_;
+ };
+
+ Program* GetProgramInfo(GLES2Implementation* gl, GLuint program);
+
+ // TODO(gman): Switch to a faster container.
+ typedef std::map<GLuint, Program> ProgramInfoMap;
+
+ ProgramInfoMap program_infos_;
+
+ mutable base::Lock lock_;
+};
+
+CachedProgramInfoManager::Program::UniformInfo::UniformInfo(
+ GLsizei _size, GLenum _type, const std::string& _name)
+ : size(_size),
+ type(_type),
+ name(_name) {
+ is_array = (!name.empty() && name[name.size() - 1] == ']');
+ DCHECK(!(size > 1 && !is_array));
+}
+
+CachedProgramInfoManager::Program::Program()
+ : cached_(false),
+ max_attrib_name_length_(0),
+ max_uniform_name_length_(0),
+ link_status_(false) {
+}
+
+// TODO(gman): Add a faster lookup.
+GLint CachedProgramInfoManager::Program::GetAttribLocation(
+ const std::string& name) const {
+ for (GLuint ii = 0; ii < attrib_infos_.size(); ++ii) {
+ const VertexAttrib& info = attrib_infos_[ii];
+ if (info.name == name) {
+ return info.location;
+ }
+ }
+ return -1;
+}
+
+GLint CachedProgramInfoManager::Program::GetUniformLocation(
+ const std::string& name) const {
+ bool getting_array_location = false;
+ size_t open_pos = std::string::npos;
+ int index = 0;
+ if (!GLES2Util::ParseUniformName(
+ name, &open_pos, &index, &getting_array_location)) {
+ return -1;
+ }
+ for (GLuint ii = 0; ii < uniform_infos_.size(); ++ii) {
+ const UniformInfo& info = uniform_infos_[ii];
+ if (info.name == name ||
+ (info.is_array &&
+ info.name.compare(0, info.name.size() - 3, name) == 0)) {
+ return info.element_locations[0];
+ } else if (getting_array_location && info.is_array) {
+ // Look for an array specification.
+ size_t open_pos_2 = info.name.find_last_of('[');
+ if (open_pos_2 == open_pos &&
+ name.compare(0, open_pos, info.name, 0, open_pos) == 0) {
+ if (index >= 0 && index < info.size) {
+ return info.element_locations[index];
+ }
+ }
+ }
+ }
+ return -1;
+}
+
+bool CachedProgramInfoManager::Program::GetProgramiv(
+ GLenum pname, GLint* params) {
+ switch (pname) {
+ case GL_LINK_STATUS:
+ *params = link_status_;
+ return true;
+ case GL_ACTIVE_ATTRIBUTES:
+ *params = attrib_infos_.size();
+ return true;
+ case GL_ACTIVE_ATTRIBUTE_MAX_LENGTH:
+ *params = max_attrib_name_length_;
+ return true;
+ case GL_ACTIVE_UNIFORMS:
+ *params = uniform_infos_.size();
+ return true;
+ case GL_ACTIVE_UNIFORM_MAX_LENGTH:
+ *params = max_uniform_name_length_;
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+template<typename T> static T LocalGetAs(
+ const std::vector<int8>& data, uint32 offset, size_t size) {
+ const int8* p = &data[0] + offset;
+ if (offset + size > data.size()) {
+ NOTREACHED();
+ return NULL;
+ }
+ return static_cast<T>(static_cast<const void*>(p));
+}
+
+void CachedProgramInfoManager::Program::Update(
+ GLES2Implementation* gl, GLuint program) {
+ if (cached_) {
+ return;
+ }
+ std::vector<int8> result;
+ gl->GetProgramInfoCHROMIUMHelper(program, &result);
+ if (result.empty()) {
+ // This should only happen on a lost context.
+ return;
+ }
+ DCHECK_GE(result.size(), sizeof(ProgramInfoHeader));
+ const ProgramInfoHeader* header = LocalGetAs<const ProgramInfoHeader*>(
+ result, 0, sizeof(header));
+ link_status_ = header->link_status != 0;
+ if (!link_status_) {
+ return;
+ }
+ attrib_infos_.clear();
+ uniform_infos_.clear();
+ max_attrib_name_length_ = 0;
+ max_uniform_name_length_ = 0;
+ const ProgramInput* inputs = LocalGetAs<const ProgramInput*>(
+ result, sizeof(*header),
+ sizeof(ProgramInput) * (header->num_attribs + header->num_uniforms));
+ const ProgramInput* input = inputs;
+ for (uint32 ii = 0; ii < header->num_attribs; ++ii) {
+ const int32* location = LocalGetAs<const int32*>(
+ result, input->location_offset, sizeof(int32));
+ const char* name_buf = LocalGetAs<const char*>(
+ result, input->name_offset, input->name_length);
+ std::string name(name_buf, input->name_length);
+ attrib_infos_.push_back(
+ VertexAttrib(input->size, input->type, name, *location));
+ max_attrib_name_length_ = std::max(
+ static_cast<GLsizei>(name.size() + 1), max_attrib_name_length_);
+ ++input;
+ }
+ for (uint32 ii = 0; ii < header->num_uniforms; ++ii) {
+ const int32* locations = LocalGetAs<const int32*>(
+ result, input->location_offset, sizeof(int32) * input->size);
+ const char* name_buf = LocalGetAs<const char*>(
+ result, input->name_offset, input->name_length);
+ std::string name(name_buf, input->name_length);
+ UniformInfo info(input->size, input->type, name);
+ max_uniform_name_length_ = std::max(
+ static_cast<GLsizei>(name.size() + 1), max_uniform_name_length_);
+ for (int32 jj = 0; jj < input->size; ++jj) {
+ info.element_locations.push_back(locations[jj]);
+ }
+ uniform_infos_.push_back(info);
+ ++input;
+ }
+ DCHECK_EQ(header->num_attribs + header->num_uniforms,
+ static_cast<uint32>(input - inputs));
+ cached_ = true;
+}
+
+CachedProgramInfoManager::CachedProgramInfoManager() {
+}
+
+CachedProgramInfoManager::~CachedProgramInfoManager() {
+
+}
+
+CachedProgramInfoManager::Program*
+ CachedProgramInfoManager::GetProgramInfo(
+ GLES2Implementation* gl, GLuint program) {
+ lock_.AssertAcquired();
+ ProgramInfoMap::iterator it = program_infos_.find(program);
+ if (it == program_infos_.end()) {
+ return NULL;
+ }
+ Program* info = &it->second;
+ info->Update(gl, program);
+ return info;
+}
+
+void CachedProgramInfoManager::CreateInfo(GLuint program) {
+ base::AutoLock auto_lock(lock_);
+ program_infos_.erase(program);
+ std::pair<ProgramInfoMap::iterator, bool> result =
+ program_infos_.insert(std::make_pair(program, Program()));
+
+ DCHECK(result.second);
+}
+
+void CachedProgramInfoManager::DeleteInfo(GLuint program) {
+ base::AutoLock auto_lock(lock_);
+ program_infos_.erase(program);
+}
+
+bool CachedProgramInfoManager::GetProgramiv(
+ GLES2Implementation* gl, GLuint program, GLenum pname, GLint* params) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (!info) {
+ return false;
+ }
+ return info->GetProgramiv(pname, params);
+}
+
+GLint CachedProgramInfoManager::GetAttribLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (info) {
+ return info->GetAttribLocation(name);
+ }
+ return gl->GetAttribLocationHelper(program, name);
+}
+
+GLint CachedProgramInfoManager::GetUniformLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (info) {
+ return info->GetUniformLocation(name);
+ }
+ return gl->GetUniformLocationHelper(program, name);
+}
+
+bool CachedProgramInfoManager::GetActiveAttrib(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (info) {
+ const Program::VertexAttrib* attrib_info =
+ info->GetAttribInfo(index);
+ if (attrib_info) {
+ if (size) {
+ *size = attrib_info->size;
+ }
+ if (type) {
+ *type = attrib_info->type;
+ }
+ if (length || name) {
+ GLsizei max_size = std::min(static_cast<size_t>(bufsize) - 1,
+ std::max(static_cast<size_t>(0),
+ attrib_info->name.size()));
+ if (length) {
+ *length = max_size;
+ }
+ if (name && bufsize > 0) {
+ memcpy(name, attrib_info->name.c_str(), max_size);
+ name[max_size] = '\0';
+ }
+ }
+ return true;
+ }
+ }
+ return gl->GetActiveAttribHelper(
+ program, index, bufsize, length, size, type, name);
+}
+
+bool CachedProgramInfoManager::GetActiveUniform(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) {
+ base::AutoLock auto_lock(lock_);
+ Program* info = GetProgramInfo(gl, program);
+ if (info) {
+ const Program::UniformInfo* uniform_info = info->GetUniformInfo(index);
+ if (uniform_info) {
+ if (size) {
+ *size = uniform_info->size;
+ }
+ if (type) {
+ *type = uniform_info->type;
+ }
+ if (length || name) {
+ GLsizei max_size = std::min(static_cast<size_t>(bufsize) - 1,
+ std::max(static_cast<size_t>(0),
+ uniform_info->name.size()));
+ if (length) {
+ *length = max_size;
+ }
+ if (name && bufsize > 0) {
+ memcpy(name, uniform_info->name.c_str(), max_size);
+ name[max_size] = '\0';
+ }
+ }
+ return true;
+ }
+ }
+ return gl->GetActiveUniformHelper(
+ program, index, bufsize, length, size, type, name);
+}
+
+ProgramInfoManager::ProgramInfoManager() {
+}
+
+ProgramInfoManager::~ProgramInfoManager() {
+}
+
+ProgramInfoManager* ProgramInfoManager::Create(
+ bool shared_resources_across_processes) {
+ if (shared_resources_across_processes) {
+ return new NonCachedProgramInfoManager();
+ } else {
+ return new CachedProgramInfoManager();
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
+
diff --git a/gpu/command_buffer/client/program_info_manager.h b/gpu/command_buffer/client/program_info_manager.h
new file mode 100644
index 0000000..099f182
--- /dev/null
+++ b/gpu/command_buffer/client/program_info_manager.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_PROGRAM_INFO_MANAGER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_PROGRAM_INFO_MANAGER_H_
+
+#include <GLES2/gl2.h>
+#include "gles2_impl_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Implementation;
+
+// Manages info about OpenGL ES Programs.
+class GLES2_IMPL_EXPORT ProgramInfoManager {
+ public:
+ virtual ~ProgramInfoManager();
+
+ static ProgramInfoManager* Create(bool shared_resources_across_processes);
+
+ virtual void CreateInfo(GLuint program) = 0;
+
+ virtual void DeleteInfo(GLuint program) = 0;
+
+ virtual bool GetProgramiv(
+ GLES2Implementation* gl, GLuint program, GLenum pname, GLint* params) = 0;
+
+ virtual GLint GetAttribLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) = 0;
+
+ virtual GLint GetUniformLocation(
+ GLES2Implementation* gl, GLuint program, const char* name) = 0;
+
+ virtual bool GetActiveAttrib(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) = 0;
+
+ virtual bool GetActiveUniform(
+ GLES2Implementation* gl,
+ GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
+ GLint* size, GLenum* type, char* name) = 0;
+
+ protected:
+ ProgramInfoManager();
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_PROGRAM_INFO_MANAGER_H_
diff --git a/gpu/command_buffer/client/program_info_manager_unittest.cc b/gpu/command_buffer/client/program_info_manager_unittest.cc
new file mode 100644
index 0000000..e5002fd
--- /dev/null
+++ b/gpu/command_buffer/client/program_info_manager_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the Command Buffer Helper.
+
+#include "gpu/command_buffer/client/program_info_manager.h"
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+namespace gles2 {
+
+class ProgramInfoManagerTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ }
+
+ virtual void TearDown() {
+ }
+
+ scoped_ptr<ProgramInfoManager> program_info_manager_;
+};
+
+TEST_F(ProgramInfoManagerTest, Basic) {
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/query_tracker.cc b/gpu/command_buffer/client/query_tracker.cc
new file mode 100644
index 0000000..c12d975
--- /dev/null
+++ b/gpu/command_buffer/client/query_tracker.cc
@@ -0,0 +1,261 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/query_tracker.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES2/gl2extchromium.h>
+
+#include "base/atomicops.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/common/time.h"
+
+namespace gpu {
+namespace gles2 {
+
+QuerySyncManager::QuerySyncManager(MappedMemoryManager* manager)
+ : mapped_memory_(manager) {
+ DCHECK(manager);
+}
+
+QuerySyncManager::~QuerySyncManager() {
+ while (!buckets_.empty()) {
+ mapped_memory_->Free(buckets_.front()->syncs);
+ delete buckets_.front();
+ buckets_.pop_front();
+ }
+}
+
+bool QuerySyncManager::Alloc(QuerySyncManager::QueryInfo* info) {
+ DCHECK(info);
+ if (free_queries_.empty()) {
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(
+ kSyncsPerBucket * sizeof(QuerySync), &shm_id, &shm_offset);
+ if (!mem) {
+ return false;
+ }
+ QuerySync* syncs = static_cast<QuerySync*>(mem);
+ Bucket* bucket = new Bucket(syncs);
+ buckets_.push_back(bucket);
+ for (size_t ii = 0; ii < kSyncsPerBucket; ++ii) {
+ free_queries_.push_back(QueryInfo(bucket, shm_id, shm_offset, syncs));
+ ++syncs;
+ shm_offset += sizeof(*syncs);
+ }
+ }
+ *info = free_queries_.front();
+ ++(info->bucket->used_query_count);
+ info->sync->Reset();
+ free_queries_.pop_front();
+ return true;
+}
+
+void QuerySyncManager::Free(const QuerySyncManager::QueryInfo& info) {
+ DCHECK_GT(info.bucket->used_query_count, 0u);
+ --(info.bucket->used_query_count);
+ free_queries_.push_back(info);
+}
+
+void QuerySyncManager::Shrink() {
+ std::deque<QueryInfo> new_queue;
+ while (!free_queries_.empty()) {
+ if (free_queries_.front().bucket->used_query_count)
+ new_queue.push_back(free_queries_.front());
+ free_queries_.pop_front();
+ }
+ free_queries_.swap(new_queue);
+
+ std::deque<Bucket*> new_buckets;
+ while (!buckets_.empty()) {
+ Bucket* bucket = buckets_.front();
+ if (bucket->used_query_count) {
+ new_buckets.push_back(bucket);
+ } else {
+ mapped_memory_->Free(bucket->syncs);
+ delete bucket;
+ }
+ buckets_.pop_front();
+ }
+ buckets_.swap(new_buckets);
+}
+
+QueryTracker::Query::Query(GLuint id, GLenum target,
+ const QuerySyncManager::QueryInfo& info)
+ : id_(id),
+ target_(target),
+ info_(info),
+ state_(kUninitialized),
+ submit_count_(0),
+ token_(0),
+ flush_count_(0),
+ client_begin_time_us_(0),
+ result_(0) {
+ }
+
+
+void QueryTracker::Query::Begin(GLES2Implementation* gl) {
+ // init memory, inc count
+ MarkAsActive();
+
+ switch (target()) {
+ case GL_GET_ERROR_QUERY_CHROMIUM:
+ // To nothing on begin for error queries.
+ break;
+ case GL_LATENCY_QUERY_CHROMIUM:
+ client_begin_time_us_ = MicrosecondsSinceOriginOfTime();
+ // tell service about id, shared memory and count
+ gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ break;
+ case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM:
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ default:
+ // tell service about id, shared memory and count
+ gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ break;
+ }
+}
+
+void QueryTracker::Query::End(GLES2Implementation* gl) {
+ switch (target()) {
+ case GL_GET_ERROR_QUERY_CHROMIUM: {
+ GLenum error = gl->GetClientSideGLError();
+ if (error == GL_NO_ERROR) {
+ // There was no error so start the query on the service.
+ // it will end immediately.
+ gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset());
+ } else {
+ // There's an error on the client, no need to bother the service. Just
+ // set the query as completed and return the error.
+ if (error != GL_NO_ERROR) {
+ state_ = kComplete;
+ result_ = error;
+ return;
+ }
+ }
+ }
+ }
+ flush_count_ = gl->helper()->flush_generation();
+ gl->helper()->EndQueryEXT(target(), submit_count());
+ MarkAsPending(gl->helper()->InsertToken());
+}
+
+bool QueryTracker::Query::CheckResultsAvailable(
+ CommandBufferHelper* helper) {
+ if (Pending()) {
+ if (base::subtle::Acquire_Load(&info_.sync->process_count) ==
+ submit_count_ ||
+ helper->IsContextLost()) {
+ switch (target()) {
+ case GL_COMMANDS_ISSUED_CHROMIUM:
+ result_ = std::min(info_.sync->result,
+ static_cast<uint64>(0xFFFFFFFFL));
+ break;
+ case GL_LATENCY_QUERY_CHROMIUM:
+ // Disabled DCHECK because of http://crbug.com/419236.
+ //DCHECK(info_.sync->result >= client_begin_time_us_);
+ result_ = std::min(info_.sync->result - client_begin_time_us_,
+ static_cast<uint64>(0xFFFFFFFFL));
+ break;
+ case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM:
+ case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
+ default:
+ result_ = info_.sync->result;
+ break;
+ }
+ state_ = kComplete;
+ } else {
+ if ((helper->flush_generation() - flush_count_ - 1) >= 0x80000000) {
+ helper->Flush();
+ } else {
+ // Insert no-ops so that eventually the GPU process will see more work.
+ helper->Noop(1);
+ }
+ }
+ }
+ return state_ == kComplete;
+}
+
+uint32 QueryTracker::Query::GetResult() const {
+ DCHECK(state_ == kComplete || state_ == kUninitialized);
+ return result_;
+}
+
+QueryTracker::QueryTracker(MappedMemoryManager* manager)
+ : query_sync_manager_(manager) {
+}
+
+QueryTracker::~QueryTracker() {
+ while (!queries_.empty()) {
+ delete queries_.begin()->second;
+ queries_.erase(queries_.begin());
+ }
+ while (!removed_queries_.empty()) {
+ delete removed_queries_.front();
+ removed_queries_.pop_front();
+ }
+}
+
+QueryTracker::Query* QueryTracker::CreateQuery(GLuint id, GLenum target) {
+ DCHECK_NE(0u, id);
+ FreeCompletedQueries();
+ QuerySyncManager::QueryInfo info;
+ if (!query_sync_manager_.Alloc(&info)) {
+ return NULL;
+ }
+ Query* query = new Query(id, target, info);
+ std::pair<QueryMap::iterator, bool> result =
+ queries_.insert(std::make_pair(id, query));
+ DCHECK(result.second);
+ return query;
+}
+
+QueryTracker::Query* QueryTracker::GetQuery(
+ GLuint client_id) {
+ QueryMap::iterator it = queries_.find(client_id);
+ return it != queries_.end() ? it->second : NULL;
+}
+
+void QueryTracker::RemoveQuery(GLuint client_id) {
+ QueryMap::iterator it = queries_.find(client_id);
+ if (it != queries_.end()) {
+ Query* query = it->second;
+ // When you delete a query you can't mark its memory as unused until it's
+ // completed.
+ // Note: If you don't do this you won't mess up the service but you will
+ // mess up yourself.
+ removed_queries_.push_back(query);
+ queries_.erase(it);
+ FreeCompletedQueries();
+ }
+}
+
+void QueryTracker::Shrink() {
+ FreeCompletedQueries();
+ query_sync_manager_.Shrink();
+}
+
+void QueryTracker::FreeCompletedQueries() {
+ QueryList::iterator it = removed_queries_.begin();
+ while (it != removed_queries_.end()) {
+ Query* query = *it;
+ if (query->Pending() &&
+ base::subtle::Acquire_Load(&query->info_.sync->process_count) !=
+ query->submit_count()) {
+ ++it;
+ continue;
+ }
+
+ query_sync_manager_.Free(query->info_);
+ it = removed_queries_.erase(it);
+ delete query;
+ }
+}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/query_tracker.h b/gpu/command_buffer/client/query_tracker.h
new file mode 100644
index 0000000..72e29e7
--- /dev/null
+++ b/gpu/command_buffer/client/query_tracker.h
@@ -0,0 +1,177 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_QUERY_TRACKER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_QUERY_TRACKER_H_
+
+#include <GLES2/gl2.h>
+
+#include <deque>
+#include <list>
+
+#include "base/atomicops.h"
+#include "base/containers/hash_tables.h"
+#include "gles2_impl_export.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+class MappedMemoryManager;
+
+namespace gles2 {
+
+class GLES2Implementation;
+
+// Manages buckets of QuerySync instances in mapped memory.
+class GLES2_IMPL_EXPORT QuerySyncManager {
+ public:
+ static const size_t kSyncsPerBucket = 4096;
+
+ struct Bucket {
+ explicit Bucket(QuerySync* sync_mem)
+ : syncs(sync_mem),
+ used_query_count(0) {
+ }
+ QuerySync* syncs;
+ unsigned used_query_count;
+ };
+ struct QueryInfo {
+ QueryInfo(Bucket* bucket, int32 id, uint32 offset, QuerySync* sync_mem)
+ : bucket(bucket),
+ shm_id(id),
+ shm_offset(offset),
+ sync(sync_mem) {
+ }
+
+ QueryInfo()
+ : bucket(NULL),
+ shm_id(0),
+ shm_offset(0),
+ sync(NULL) {
+ }
+
+ Bucket* bucket;
+ int32 shm_id;
+ uint32 shm_offset;
+ QuerySync* sync;
+ };
+
+ explicit QuerySyncManager(MappedMemoryManager* manager);
+ ~QuerySyncManager();
+
+ bool Alloc(QueryInfo* info);
+ void Free(const QueryInfo& sync);
+ void Shrink();
+
+ private:
+ MappedMemoryManager* mapped_memory_;
+ std::deque<Bucket*> buckets_;
+ std::deque<QueryInfo> free_queries_;
+
+ DISALLOW_COPY_AND_ASSIGN(QuerySyncManager);
+};
+
+// Tracks queries for client side of command buffer.
+class GLES2_IMPL_EXPORT QueryTracker {
+ public:
+ class GLES2_IMPL_EXPORT Query {
+ public:
+ enum State {
+ kUninitialized, // never used
+ kActive, // between begin - end
+ kPending, // not yet complete
+ kComplete // completed
+ };
+
+ Query(GLuint id, GLenum target, const QuerySyncManager::QueryInfo& info);
+
+ GLenum target() const {
+ return target_;
+ }
+
+ GLenum id() const {
+ return id_;
+ }
+
+ int32 shm_id() const {
+ return info_.shm_id;
+ }
+
+ uint32 shm_offset() const {
+ return info_.shm_offset;
+ }
+
+ void MarkAsActive() {
+ state_ = kActive;
+ ++submit_count_;
+ if (submit_count_ == INT_MAX)
+ submit_count_ = 1;
+ }
+
+ void MarkAsPending(int32 token) {
+ token_ = token;
+ state_ = kPending;
+ }
+
+ base::subtle::Atomic32 submit_count() const { return submit_count_; }
+
+ int32 token() const {
+ return token_;
+ }
+
+ bool NeverUsed() const {
+ return state_ == kUninitialized;
+ }
+
+ bool Pending() const {
+ return state_ == kPending;
+ }
+
+ bool CheckResultsAvailable(CommandBufferHelper* helper);
+
+ uint32 GetResult() const;
+
+ void Begin(GLES2Implementation* gl);
+ void End(GLES2Implementation* gl);
+
+ private:
+ friend class QueryTracker;
+ friend class QueryTrackerTest;
+
+ GLuint id_;
+ GLenum target_;
+ QuerySyncManager::QueryInfo info_;
+ State state_;
+ base::subtle::Atomic32 submit_count_;
+ int32 token_;
+ uint32 flush_count_;
+ uint64 client_begin_time_us_; // Only used for latency query target.
+ uint32 result_;
+ };
+
+ QueryTracker(MappedMemoryManager* manager);
+ ~QueryTracker();
+
+ Query* CreateQuery(GLuint id, GLenum target);
+ Query* GetQuery(GLuint id);
+ void RemoveQuery(GLuint id);
+ void Shrink();
+ void FreeCompletedQueries();
+
+ private:
+ typedef base::hash_map<GLuint, Query*> QueryMap;
+ typedef std::list<Query*> QueryList;
+
+ QueryMap queries_;
+ QueryList removed_queries_;
+ QuerySyncManager query_sync_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(QueryTracker);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_QUERY_TRACKER_H_
diff --git a/gpu/command_buffer/client/query_tracker_unittest.cc b/gpu/command_buffer/client/query_tracker_unittest.cc
new file mode 100644
index 0000000..cd2ccf6
--- /dev/null
+++ b/gpu/command_buffer/client/query_tracker_unittest.cc
@@ -0,0 +1,238 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the QueryTracker.
+
+#include "gpu/command_buffer/client/query_tracker.h"
+
+#include <GLES2/gl2ext.h>
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace gpu {
+namespace gles2 {
+
+namespace {
+void EmptyPoll() {
+}
+}
+
+class QuerySyncManagerTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+
+ virtual void SetUp() {
+ command_buffer_.reset(new MockClientCommandBuffer());
+ helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
+ sync_manager_.reset(new QuerySyncManager(mapped_memory_.get()));
+ }
+
+ virtual void TearDown() {
+ sync_manager_.reset();
+ mapped_memory_.reset();
+ helper_.reset();
+ command_buffer_.reset();
+ }
+
+ scoped_ptr<CommandBuffer> command_buffer_;
+ scoped_ptr<GLES2CmdHelper> helper_;
+ scoped_ptr<MappedMemoryManager> mapped_memory_;
+ scoped_ptr<QuerySyncManager> sync_manager_;
+};
+
+TEST_F(QuerySyncManagerTest, Basic) {
+ QuerySyncManager::QueryInfo infos[4];
+ memset(&infos, 0xBD, sizeof(infos));
+
+ for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ EXPECT_TRUE(sync_manager_->Alloc(&infos[ii]));
+ EXPECT_NE(0, infos[ii].shm_id);
+ ASSERT_TRUE(infos[ii].sync != NULL);
+ EXPECT_EQ(0, infos[ii].sync->process_count);
+ EXPECT_EQ(0u, infos[ii].sync->result);
+ }
+
+ for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ sync_manager_->Free(infos[ii]);
+ }
+}
+
+TEST_F(QuerySyncManagerTest, DontFree) {
+ QuerySyncManager::QueryInfo infos[4];
+ memset(&infos, 0xBD, sizeof(infos));
+
+ for (size_t ii = 0; ii < arraysize(infos); ++ii) {
+ EXPECT_TRUE(sync_manager_->Alloc(&infos[ii]));
+ }
+}
+
+class QueryTrackerTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+
+ virtual void SetUp() {
+ command_buffer_.reset(new MockClientCommandBuffer());
+ helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
+ helper_->Initialize(kCommandBufferSizeBytes);
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
+ query_tracker_.reset(new QueryTracker(mapped_memory_.get()));
+ }
+
+ virtual void TearDown() {
+ query_tracker_.reset();
+ mapped_memory_.reset();
+ helper_.reset();
+ command_buffer_.reset();
+ }
+
+ QuerySync* GetSync(QueryTracker::Query* query) {
+ return query->info_.sync;
+ }
+
+ QuerySyncManager::Bucket* GetBucket(QueryTracker::Query* query) {
+ return query->info_.bucket;
+ }
+
+ uint32 GetFlushGeneration() { return helper_->flush_generation(); }
+
+ scoped_ptr<CommandBuffer> command_buffer_;
+ scoped_ptr<GLES2CmdHelper> helper_;
+ scoped_ptr<MappedMemoryManager> mapped_memory_;
+ scoped_ptr<QueryTracker> query_tracker_;
+};
+
+TEST_F(QueryTrackerTest, Basic) {
+ const GLuint kId1 = 123;
+ const GLuint kId2 = 124;
+
+ // Check we can create a Query.
+ QueryTracker::Query* query = query_tracker_->CreateQuery(
+ kId1, GL_ANY_SAMPLES_PASSED_EXT);
+ ASSERT_TRUE(query != NULL);
+ // Check we can get the same Query.
+ EXPECT_EQ(query, query_tracker_->GetQuery(kId1));
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(query_tracker_->GetQuery(kId2) == NULL);
+ // Check we can delete the query.
+ query_tracker_->RemoveQuery(kId1);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(query_tracker_->GetQuery(kId1) == NULL);
+}
+
+TEST_F(QueryTrackerTest, Query) {
+ const GLuint kId1 = 123;
+ const int32 kToken = 46;
+ const uint32 kResult = 456;
+
+ // Create a Query.
+ QueryTracker::Query* query = query_tracker_->CreateQuery(
+ kId1, GL_ANY_SAMPLES_PASSED_EXT);
+ ASSERT_TRUE(query != NULL);
+ EXPECT_TRUE(query->NeverUsed());
+ EXPECT_FALSE(query->Pending());
+ EXPECT_EQ(0, query->token());
+ EXPECT_EQ(0, query->submit_count());
+
+ // Check MarkAsActive.
+ query->MarkAsActive();
+ EXPECT_FALSE(query->NeverUsed());
+ EXPECT_FALSE(query->Pending());
+ EXPECT_EQ(0, query->token());
+ EXPECT_EQ(1, query->submit_count());
+
+ // Check MarkAsPending.
+ query->MarkAsPending(kToken);
+ EXPECT_FALSE(query->NeverUsed());
+ EXPECT_TRUE(query->Pending());
+ EXPECT_EQ(kToken, query->token());
+ EXPECT_EQ(1, query->submit_count());
+
+ // Check CheckResultsAvailable.
+ EXPECT_FALSE(query->CheckResultsAvailable(helper_.get()));
+ EXPECT_FALSE(query->NeverUsed());
+ EXPECT_TRUE(query->Pending());
+
+ // Flush only once if no more flushes happened between a call to
+ // EndQuery command and CheckResultsAvailable
+ // Advance put_ so flush calls in CheckResultsAvailable go through
+ // and updates flush_generation count
+ helper_->Noop(1);
+ // Set Query in pending state_ to simulate EndQuery command is called
+ query->MarkAsPending(kToken);
+ EXPECT_TRUE(query->Pending());
+ // Store FlushGeneration count after EndQuery is called
+ uint32 gen1 = GetFlushGeneration();
+ EXPECT_FALSE(query->CheckResultsAvailable(helper_.get()));
+ uint32 gen2 = GetFlushGeneration();
+ EXPECT_NE(gen1, gen2);
+ // Repeated calls to CheckResultsAvailable should not flush unnecessarily
+ EXPECT_FALSE(query->CheckResultsAvailable(helper_.get()));
+ gen1 = GetFlushGeneration();
+ EXPECT_EQ(gen1, gen2);
+ EXPECT_FALSE(query->CheckResultsAvailable(helper_.get()));
+ gen1 = GetFlushGeneration();
+ EXPECT_EQ(gen1, gen2);
+
+ // Simulate GPU process marking it as available.
+ QuerySync* sync = GetSync(query);
+ sync->process_count = query->submit_count();
+ sync->result = kResult;
+
+ // Check CheckResultsAvailable.
+ EXPECT_TRUE(query->CheckResultsAvailable(helper_.get()));
+ EXPECT_EQ(kResult, query->GetResult());
+ EXPECT_FALSE(query->NeverUsed());
+ EXPECT_FALSE(query->Pending());
+}
+
+TEST_F(QueryTrackerTest, Remove) {
+ const GLuint kId1 = 123;
+ const int32 kToken = 46;
+ const uint32 kResult = 456;
+
+ // Create a Query.
+ QueryTracker::Query* query = query_tracker_->CreateQuery(
+ kId1, GL_ANY_SAMPLES_PASSED_EXT);
+ ASSERT_TRUE(query != NULL);
+
+ QuerySyncManager::Bucket* bucket = GetBucket(query);
+ EXPECT_EQ(1u, bucket->used_query_count);
+
+ query->MarkAsActive();
+ query->MarkAsPending(kToken);
+
+ query_tracker_->RemoveQuery(kId1);
+ // Check we get nothing for a non-existent query.
+ EXPECT_TRUE(query_tracker_->GetQuery(kId1) == NULL);
+
+ // Check that memory was not freed.
+ EXPECT_EQ(1u, bucket->used_query_count);
+
+ // Simulate GPU process marking it as available.
+ QuerySync* sync = GetSync(query);
+ sync->process_count = query->submit_count();
+ sync->result = kResult;
+
+ // Check FreeCompletedQueries.
+ query_tracker_->FreeCompletedQueries();
+ EXPECT_EQ(0u, bucket->used_query_count);
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/ref_counted.h b/gpu/command_buffer/client/ref_counted.h
new file mode 100644
index 0000000..79ee8bc
--- /dev/null
+++ b/gpu/command_buffer/client/ref_counted.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_REF_COUNTED_H_
+#define GPU_COMMAND_BUFFER_CLIENT_REF_COUNTED_H_
+
+#include "base/memory/ref_counted.h"
+namespace gpu {
+using base::RefCountedThreadSafe;
+}
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_REF_COUNTED_H_
diff --git a/gpu/command_buffer/client/ring_buffer.cc b/gpu/command_buffer/client/ring_buffer.cc
new file mode 100644
index 0000000..813bb34
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer.cc
@@ -0,0 +1,131 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the RingBuffer class.
+
+#include "gpu/command_buffer/client/ring_buffer.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+RingBuffer::RingBuffer(unsigned int alignment, Offset base_offset,
+ unsigned int size, CommandBufferHelper* helper,
+ void* base)
+ : helper_(helper),
+ base_offset_(base_offset),
+ size_(size),
+ free_offset_(0),
+ in_use_offset_(0),
+ alignment_(alignment),
+ base_(static_cast<int8*>(base) - base_offset) {
+}
+
+RingBuffer::~RingBuffer() {
+ // Free blocks pending tokens.
+ while (!blocks_.empty()) {
+ FreeOldestBlock();
+ }
+}
+
+void RingBuffer::FreeOldestBlock() {
+ DCHECK(!blocks_.empty()) << "no free blocks";
+ Block& block = blocks_.front();
+ DCHECK(block.state != IN_USE)
+ << "attempt to allocate more than maximum memory";
+ if (block.state == FREE_PENDING_TOKEN) {
+ helper_->WaitForToken(block.token);
+ }
+ in_use_offset_ += block.size;
+ if (in_use_offset_ == size_) {
+ in_use_offset_ = 0;
+ }
+ // If they match then the entire buffer is free.
+ if (in_use_offset_ == free_offset_) {
+ in_use_offset_ = 0;
+ free_offset_ = 0;
+ }
+ blocks_.pop_front();
+}
+
+void* RingBuffer::Alloc(unsigned int size) {
+ DCHECK_LE(size, size_) << "attempt to allocate more than maximum memory";
+ DCHECK(blocks_.empty() || blocks_.back().state != IN_USE)
+ << "Attempt to alloc another block before freeing the previous.";
+ // Similarly to malloc, an allocation of 0 allocates at least 1 byte, to
+ // return different pointers every time.
+ if (size == 0) size = 1;
+ // Allocate rounded to alignment size so that the offsets are always
+ // memory-aligned.
+ size = RoundToAlignment(size);
+
+ // Wait until there is enough room.
+ while (size > GetLargestFreeSizeNoWaiting()) {
+ FreeOldestBlock();
+ }
+
+ if (size + free_offset_ > size_) {
+ // Add padding to fill space before wrapping around
+ blocks_.push_back(Block(free_offset_, size_ - free_offset_, PADDING));
+ free_offset_ = 0;
+ }
+
+ Offset offset = free_offset_;
+ blocks_.push_back(Block(offset, size, IN_USE));
+ free_offset_ += size;
+ if (free_offset_ == size_) {
+ free_offset_ = 0;
+ }
+ return GetPointer(offset + base_offset_);
+}
+
+void RingBuffer::FreePendingToken(void* pointer,
+ unsigned int token) {
+ Offset offset = GetOffset(pointer);
+ offset -= base_offset_;
+ DCHECK(!blocks_.empty()) << "no allocations to free";
+ for (Container::reverse_iterator it = blocks_.rbegin();
+ it != blocks_.rend();
+ ++it) {
+ Block& block = *it;
+ if (block.offset == offset) {
+ DCHECK(block.state == IN_USE)
+ << "block that corresponds to offset already freed";
+ block.token = token;
+ block.state = FREE_PENDING_TOKEN;
+ return;
+ }
+ }
+ NOTREACHED() << "attempt to free non-existant block";
+}
+
+unsigned int RingBuffer::GetLargestFreeSizeNoWaiting() {
+ unsigned int last_token_read = helper_->last_token_read();
+ while (!blocks_.empty()) {
+ Block& block = blocks_.front();
+ if (block.token > last_token_read || block.state == IN_USE) break;
+ FreeOldestBlock();
+ }
+ if (free_offset_ == in_use_offset_) {
+ if (blocks_.empty()) {
+ // The entire buffer is free.
+ DCHECK_EQ(free_offset_, 0u);
+ return size_;
+ } else {
+ // The entire buffer is in use.
+ return 0;
+ }
+ } else if (free_offset_ > in_use_offset_) {
+ // It's free from free_offset_ to size_ and from 0 to in_use_offset_
+ return std::max(size_ - free_offset_, in_use_offset_);
+ } else {
+ // It's free from free_offset_ -> in_use_offset_;
+ return in_use_offset_ - free_offset_;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/ring_buffer.h b/gpu/command_buffer/client/ring_buffer.h
new file mode 100644
index 0000000..dfe16f7
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the definition of the RingBuffer class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
+
+#include <deque>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+class CommandBufferHelper;
+
+// RingBuffer manages a piece of memory as a ring buffer. Memory is allocated
+// with Alloc and then a is freed pending a token with FreePendingToken. Old
+// allocations must not be kept past new allocations.
+class GPU_EXPORT RingBuffer {
+ public:
+ typedef unsigned int Offset;
+
+ // Creates a RingBuffer.
+ // Parameters:
+ // alignment: Alignment for allocations.
+ // base_offset: The offset of the start of the buffer.
+ // size: The size of the buffer in bytes.
+ // helper: A CommandBufferHelper for dealing with tokens.
+ // base: The physical address that corresponds to base_offset.
+ RingBuffer(unsigned int alignment, Offset base_offset,
+ unsigned int size, CommandBufferHelper* helper, void* base);
+
+ ~RingBuffer();
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block.
+ void* Alloc(unsigned int size);
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void* pointer, unsigned int token);
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSizeNoWaiting();
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait. Allocating a block of this size will succeed, but may
+ // block.
+ unsigned int GetLargestFreeOrPendingSize() {
+ return size_;
+ }
+
+ // Gets a pointer to a memory block given the base memory and the offset.
+ void* GetPointer(RingBuffer::Offset offset) const {
+ return static_cast<int8*>(base_) + offset;
+ }
+
+ // Gets the offset to a memory block given the base memory and the address.
+ RingBuffer::Offset GetOffset(void* pointer) const {
+ return static_cast<int8*>(pointer) - static_cast<int8*>(base_);
+ }
+
+ // Rounds the given size to the alignment in use.
+ unsigned int RoundToAlignment(unsigned int size) {
+ return (size + alignment_ - 1) & ~(alignment_ - 1);
+ }
+
+
+ private:
+ enum State {
+ IN_USE,
+ PADDING,
+ FREE_PENDING_TOKEN
+ };
+ // Book-keeping sturcture that describes a block of memory.
+ struct Block {
+ Block(Offset _offset, unsigned int _size, State _state)
+ : offset(_offset),
+ size(_size),
+ token(0),
+ state(_state) {
+ }
+ Offset offset;
+ unsigned int size;
+ unsigned int token; // token to wait for.
+ State state;
+ };
+
+ typedef std::deque<Block> Container;
+ typedef unsigned int BlockIndex;
+
+ void FreeOldestBlock();
+
+ CommandBufferHelper* helper_;
+
+ // Used blocks are added to the end, blocks are freed from the beginning.
+ Container blocks_;
+
+ // The base offset of the ring buffer.
+ Offset base_offset_;
+
+ // The size of the ring buffer.
+ Offset size_;
+
+ // Offset of first free byte.
+ Offset free_offset_;
+
+ // Offset of first used byte.
+ // Range between in_use_mark and free_mark is in use.
+ Offset in_use_offset_;
+
+ // Alignment for allocations.
+ unsigned int alignment_;
+
+ // The physical address that corresponds to base_offset.
+ void* base_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RingBuffer);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
diff --git a/gpu/command_buffer/client/ring_buffer_test.cc b/gpu/command_buffer/client/ring_buffer_test.cc
new file mode 100644
index 0000000..b3aca13
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer_test.cc
@@ -0,0 +1,216 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the tests for the RingBuffer class.
+
+#include "gpu/command_buffer/client/ring_buffer.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/transfer_buffer_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+class BaseRingBufferTest : public testing::Test {
+ protected:
+ static const unsigned int kBaseOffset = 128;
+ static const unsigned int kBufferSize = 1024;
+ static const unsigned int kAlignment = 4;
+
+ void RunPendingSetToken() {
+ for (std::vector<const void*>::iterator it = set_token_arguments_.begin();
+ it != set_token_arguments_.end();
+ ++it) {
+ api_mock_->SetToken(cmd::kSetToken, 1, *it);
+ }
+ set_token_arguments_.clear();
+ delay_set_token_ = false;
+ }
+
+ void SetToken(unsigned int command,
+ unsigned int arg_count,
+ const void* _args) {
+ EXPECT_EQ(static_cast<unsigned int>(cmd::kSetToken), command);
+ EXPECT_EQ(1u, arg_count);
+ if (delay_set_token_)
+ set_token_arguments_.push_back(_args);
+ else
+ api_mock_->SetToken(cmd::kSetToken, 1, _args);
+ }
+
+ virtual void SetUp() {
+ delay_set_token_ = false;
+ api_mock_.reset(new AsyncAPIMock(true));
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
+ .WillRepeatedly(Return(error::kNoError));
+ // Forward the SetToken calls to the engine
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillRepeatedly(DoAll(Invoke(this, &BaseRingBufferTest::SetToken),
+ Return(error::kNoError)));
+
+ {
+ TransferBufferManager* manager = new TransferBufferManager();
+ transfer_buffer_manager_.reset(manager);
+ EXPECT_TRUE(manager->Initialize());
+ }
+ command_buffer_.reset(
+ new CommandBufferService(transfer_buffer_manager_.get()));
+ EXPECT_TRUE(command_buffer_->Initialize());
+
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), api_mock_.get(), NULL));
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind(
+ &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
+ command_buffer_->SetGetBufferChangeCallback(base::Bind(
+ &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
+
+ api_mock_->set_engine(gpu_scheduler_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize(kBufferSize);
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetLastState().token;
+ }
+
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool_;
+#endif
+ base::MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+ scoped_ptr<GpuScheduler> gpu_scheduler_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ std::vector<const void*> set_token_arguments_;
+ bool delay_set_token_;
+
+ scoped_ptr<int8[]> buffer_;
+ int8* buffer_start_;
+};
+
+#ifndef _MSC_VER
+const unsigned int BaseRingBufferTest::kBaseOffset;
+const unsigned int BaseRingBufferTest::kBufferSize;
+#endif
+
+// Test fixture for RingBuffer test - Creates a RingBuffer, using a
+// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
+// it directly, not through the RPC mechanism), making sure Noops are ignored
+// and SetToken are properly forwarded to the engine.
+class RingBufferTest : public BaseRingBufferTest {
+ protected:
+ virtual void SetUp() {
+ BaseRingBufferTest::SetUp();
+
+ buffer_.reset(new int8[kBufferSize + kBaseOffset]);
+ buffer_start_ = buffer_.get() + kBaseOffset;
+ allocator_.reset(new RingBuffer(kAlignment, kBaseOffset, kBufferSize,
+ helper_.get(), buffer_start_));
+ }
+
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ BaseRingBufferTest::TearDown();
+ }
+
+ scoped_ptr<RingBuffer> allocator_;
+};
+
+// Checks basic alloc and free.
+TEST_F(RingBufferTest, TestBasic) {
+ const unsigned int kSize = 16;
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSizeNoWaiting());
+ void* pointer = allocator_->Alloc(kSize);
+ EXPECT_GE(kBufferSize, allocator_->GetOffset(pointer) - kBaseOffset + kSize);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSizeNoWaiting());
+ int32 token = helper_->InsertToken();
+ allocator_->FreePendingToken(pointer, token);
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(RingBufferTest, TestFreePendingToken) {
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ delay_set_token_ = true;
+ // Allocate several buffers to fill in the memory.
+ int32 tokens[kAllocCount];
+ for (unsigned int ii = 0; ii < kAllocCount; ++ii) {
+ void* pointer = allocator_->Alloc(kSize);
+ EXPECT_GE(kBufferSize,
+ allocator_->GetOffset(pointer) - kBaseOffset + kSize);
+ tokens[ii] = helper_->InsertToken();
+ allocator_->FreePendingToken(pointer, tokens[ii]);
+ }
+
+ EXPECT_EQ(kBufferSize - (kSize * kAllocCount),
+ allocator_->GetLargestFreeSizeNoWaiting());
+
+ RunPendingSetToken();
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until a token is passed.
+ void* pointer1 = allocator_->Alloc(kSize);
+ EXPECT_EQ(kBaseOffset, allocator_->GetOffset(pointer1));
+
+ // Check that the token has indeed passed.
+ EXPECT_LE(tokens[0], GetToken());
+
+ allocator_->FreePendingToken(pointer1, helper_->InsertToken());
+}
+
+// Tests GetLargestFreeSizeNoWaiting
+TEST_F(RingBufferTest, TestGetLargestFreeSizeNoWaiting) {
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSizeNoWaiting());
+
+ void* pointer = allocator_->Alloc(kBufferSize);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer, helper_->InsertToken());
+}
+
+TEST_F(RingBufferTest, TestFreeBug) {
+ // The first and second allocations must not match.
+ const unsigned int kAlloc1 = 3*kAlignment;
+ const unsigned int kAlloc2 = 20;
+ void* pointer = allocator_->Alloc(kAlloc1);
+ EXPECT_EQ(kBufferSize - kAlloc1, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer, helper_.get()->InsertToken());
+ pointer = allocator_->Alloc(kAlloc2);
+ EXPECT_EQ(kBufferSize - kAlloc1 - kAlloc2,
+ allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer, helper_.get()->InsertToken());
+ pointer = allocator_->Alloc(kBufferSize);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer, helper_.get()->InsertToken());
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/share_group.cc b/gpu/command_buffer/client/share_group.cc
new file mode 100644
index 0000000..96ab8d1
--- /dev/null
+++ b/gpu/command_buffer/client/share_group.cc
@@ -0,0 +1,259 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stack>
+#include <vector>
+
+#include "gpu/command_buffer/client/share_group.h"
+
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+#include "gpu/command_buffer/client/program_info_manager.h"
+#include "gpu/command_buffer/common/id_allocator.h"
+
+namespace gpu {
+namespace gles2 {
+
+ShareGroupContextData::IdHandlerData::IdHandlerData() : flush_generation_(0) {}
+ShareGroupContextData::IdHandlerData::~IdHandlerData() {}
+
+COMPILE_ASSERT(gpu::kInvalidResource == 0,
+ INVALID_RESOURCE_NOT_0_AS_GL_EXPECTS);
+
+// The standard id handler.
+class IdHandler : public IdHandlerInterface {
+ public:
+ IdHandler() { }
+ virtual ~IdHandler() { }
+
+ // Overridden from IdHandlerInterface.
+ virtual void MakeIds(
+ GLES2Implementation* /* gl_impl */,
+ GLuint id_offset, GLsizei n, GLuint* ids) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+ if (id_offset == 0) {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ ids[ii] = id_allocator_.AllocateID();
+ }
+ } else {
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ ids[ii] = id_allocator_.AllocateIDAtOrAbove(id_offset);
+ id_offset = ids[ii] + 1;
+ }
+ }
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual bool FreeIds(
+ GLES2Implementation* gl_impl,
+ GLsizei n, const GLuint* ids, DeleteFn delete_fn) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ id_allocator_.FreeID(ids[ii]);
+ }
+
+ (gl_impl->*delete_fn)(n, ids);
+ // We need to ensure that the delete call is evaluated on the service side
+ // before any other contexts issue commands using these client ids.
+ // TODO(vmiura): Can remove this by virtualizing internal ids, however
+ // this code only affects PPAPI for now.
+ gl_impl->helper()->CommandBufferHelper::Flush();
+ return true;
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual bool MarkAsUsedForBind(GLuint id) OVERRIDE {
+ if (id == 0)
+ return true;
+ base::AutoLock auto_lock(lock_);
+ return id_allocator_.MarkAsUsed(id);
+ }
+
+ virtual void FreeContext(GLES2Implementation* gl_impl) OVERRIDE {}
+
+ private:
+ base::Lock lock_;
+ IdAllocator id_allocator_;
+};
+
+// An id handler that requires Gen before Bind.
+class StrictIdHandler : public IdHandlerInterface {
+ public:
+ explicit StrictIdHandler(int id_namespace) : id_namespace_(id_namespace) {}
+ virtual ~StrictIdHandler() {}
+
+ // Overridden from IdHandler.
+ virtual void MakeIds(GLES2Implementation* gl_impl,
+ GLuint /* id_offset */,
+ GLsizei n,
+ GLuint* ids) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+
+ // Collect pending FreeIds from other flush_generation.
+ CollectPendingFreeIds(gl_impl);
+
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ if (!free_ids_.empty()) {
+ // Allocate a previously freed Id.
+ ids[ii] = free_ids_.top();
+ free_ids_.pop();
+
+ // Record kIdInUse state.
+ DCHECK(id_states_[ids[ii] - 1] == kIdFree);
+ id_states_[ids[ii] - 1] = kIdInUse;
+ } else {
+ // Allocate a new Id.
+ id_states_.push_back(kIdInUse);
+ ids[ii] = id_states_.size();
+ }
+ }
+ }
+
+ // Overridden from IdHandler.
+ virtual bool FreeIds(GLES2Implementation* gl_impl,
+ GLsizei n,
+ const GLuint* ids,
+ DeleteFn delete_fn) OVERRIDE {
+
+ // Delete stub must run before CollectPendingFreeIds.
+ (gl_impl->*delete_fn)(n, ids);
+
+ {
+ base::AutoLock auto_lock(lock_);
+
+ // Collect pending FreeIds from other flush_generation.
+ CollectPendingFreeIds(gl_impl);
+
+ // Save Ids to free in a later flush_generation.
+ ShareGroupContextData::IdHandlerData* ctxt_data =
+ gl_impl->share_group_context_data()->id_handler_data(id_namespace_);
+
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ GLuint id = ids[ii];
+ if (id != 0) {
+ // Save freed Id for later.
+ DCHECK(id_states_[id - 1] == kIdInUse);
+ id_states_[id - 1] = kIdPendingFree;
+ ctxt_data->freed_ids_.push_back(id);
+ }
+ }
+ }
+
+ return true;
+ }
+
+ // Overridden from IdHandler.
+ virtual bool MarkAsUsedForBind(GLuint id) OVERRIDE {
+#ifndef NDEBUG
+ if (id != 0) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(id_states_[id - 1] == kIdInUse);
+ }
+#endif
+ return true;
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual void FreeContext(GLES2Implementation* gl_impl) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+ CollectPendingFreeIds(gl_impl);
+ }
+
+ private:
+ enum IdState { kIdFree, kIdPendingFree, kIdInUse };
+
+ void CollectPendingFreeIds(GLES2Implementation* gl_impl) {
+ uint32 flush_generation = gl_impl->helper()->flush_generation();
+ ShareGroupContextData::IdHandlerData* ctxt_data =
+ gl_impl->share_group_context_data()->id_handler_data(id_namespace_);
+
+ if (ctxt_data->flush_generation_ != flush_generation) {
+ ctxt_data->flush_generation_ = flush_generation;
+ for (uint32 ii = 0; ii < ctxt_data->freed_ids_.size(); ++ii) {
+ const GLuint id = ctxt_data->freed_ids_[ii];
+ DCHECK(id_states_[id - 1] == kIdPendingFree);
+ id_states_[id - 1] = kIdFree;
+ free_ids_.push(id);
+ }
+ ctxt_data->freed_ids_.clear();
+ }
+ }
+
+ int id_namespace_;
+
+ base::Lock lock_;
+ std::vector<uint8> id_states_;
+ std::stack<uint32> free_ids_;
+};
+
+// An id handler for ids that are never reused.
+class NonReusedIdHandler : public IdHandlerInterface {
+ public:
+ NonReusedIdHandler() : last_id_(0) {}
+ virtual ~NonReusedIdHandler() {}
+
+ // Overridden from IdHandlerInterface.
+ virtual void MakeIds(
+ GLES2Implementation* /* gl_impl */,
+ GLuint id_offset, GLsizei n, GLuint* ids) OVERRIDE {
+ base::AutoLock auto_lock(lock_);
+ for (GLsizei ii = 0; ii < n; ++ii) {
+ ids[ii] = ++last_id_ + id_offset;
+ }
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual bool FreeIds(
+ GLES2Implementation* gl_impl,
+ GLsizei n, const GLuint* ids, DeleteFn delete_fn) OVERRIDE {
+ // Ids are never freed.
+ (gl_impl->*delete_fn)(n, ids);
+ return true;
+ }
+
+ // Overridden from IdHandlerInterface.
+ virtual bool MarkAsUsedForBind(GLuint /* id */) OVERRIDE {
+ // This is only used for Shaders and Programs which have no bind.
+ return false;
+ }
+
+ virtual void FreeContext(GLES2Implementation* gl_impl) OVERRIDE {}
+
+ private:
+ base::Lock lock_;
+ GLuint last_id_;
+};
+
+ShareGroup::ShareGroup(bool bind_generates_resource)
+ : bind_generates_resource_(bind_generates_resource) {
+ if (bind_generates_resource) {
+ for (int i = 0; i < id_namespaces::kNumIdNamespaces; ++i) {
+ if (i == id_namespaces::kProgramsAndShaders) {
+ id_handlers_[i].reset(new NonReusedIdHandler());
+ } else {
+ id_handlers_[i].reset(new IdHandler());
+ }
+ }
+ } else {
+ for (int i = 0; i < id_namespaces::kNumIdNamespaces; ++i) {
+ if (i == id_namespaces::kProgramsAndShaders) {
+ id_handlers_[i].reset(new NonReusedIdHandler());
+ } else {
+ id_handlers_[i].reset(new StrictIdHandler(i));
+ }
+ }
+ }
+ program_info_manager_.reset(ProgramInfoManager::Create(false));
+}
+
+void ShareGroup::set_program_info_manager(ProgramInfoManager* manager) {
+ program_info_manager_.reset(manager);
+}
+
+ShareGroup::~ShareGroup() {}
+
+} // namespace gles2
+} // namespace gpu
diff --git a/gpu/command_buffer/client/share_group.h b/gpu/command_buffer/client/share_group.h
new file mode 100644
index 0000000..c66704b
--- /dev/null
+++ b/gpu/command_buffer/client/share_group.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_SHARE_GROUP_H_
+#define GPU_COMMAND_BUFFER_CLIENT_SHARE_GROUP_H_
+
+#include <GLES2/gl2.h>
+#include "base/memory/scoped_ptr.h"
+#include "gles2_impl_export.h"
+#include "gpu/command_buffer/client/ref_counted.h"
+#include "gpu/command_buffer/common/gles2_cmd_format.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Implementation;
+class GLES2ImplementationTest;
+class ProgramInfoManager;
+
+typedef void (GLES2Implementation::*DeleteFn)(GLsizei n, const GLuint* ids);
+
+class ShareGroupContextData {
+ public:
+ struct IdHandlerData {
+ IdHandlerData();
+ ~IdHandlerData();
+
+ std::vector<GLuint> freed_ids_;
+ uint32 flush_generation_;
+ };
+
+ IdHandlerData* id_handler_data(int namespace_id) {
+ return &id_handler_data_[namespace_id];
+ }
+
+ private:
+ IdHandlerData id_handler_data_[id_namespaces::kNumIdNamespaces];
+};
+
+// Base class for IdHandlers
+class IdHandlerInterface {
+ public:
+ IdHandlerInterface() { }
+ virtual ~IdHandlerInterface() { }
+
+ // Makes some ids at or above id_offset.
+ virtual void MakeIds(
+ GLES2Implementation* gl_impl,
+ GLuint id_offset, GLsizei n, GLuint* ids) = 0;
+
+ // Frees some ids.
+ virtual bool FreeIds(
+ GLES2Implementation* gl_impl, GLsizei n, const GLuint* ids,
+ DeleteFn delete_fn) = 0;
+
+ // Marks an id as used for glBind functions. id = 0 does nothing.
+ virtual bool MarkAsUsedForBind(GLuint id) = 0;
+
+ // Called when a context in the share group is destructed.
+ virtual void FreeContext(GLES2Implementation* gl_impl) = 0;
+};
+
+// ShareGroup manages shared resources for contexts that are sharing resources.
+class GLES2_IMPL_EXPORT ShareGroup
+ : public gpu::RefCountedThreadSafe<ShareGroup> {
+ public:
+ ShareGroup(bool bind_generates_resource);
+
+ bool bind_generates_resource() const {
+ return bind_generates_resource_;
+ }
+
+ IdHandlerInterface* GetIdHandler(int namespace_id) const {
+ return id_handlers_[namespace_id].get();
+ }
+
+ ProgramInfoManager* program_info_manager() {
+ return program_info_manager_.get();
+ }
+
+ void FreeContext(GLES2Implementation* gl_impl) {
+ for (int i = 0; i < id_namespaces::kNumIdNamespaces; ++i) {
+ id_handlers_[i]->FreeContext(gl_impl);
+ }
+ }
+
+ private:
+ friend class gpu::RefCountedThreadSafe<ShareGroup>;
+ friend class gpu::gles2::GLES2ImplementationTest;
+ ~ShareGroup();
+
+ // Install a new program info manager. Used for testing only;
+ void set_program_info_manager(ProgramInfoManager* manager);
+
+ scoped_ptr<IdHandlerInterface> id_handlers_[id_namespaces::kNumIdNamespaces];
+ scoped_ptr<ProgramInfoManager> program_info_manager_;
+
+ bool bind_generates_resource_;
+
+ DISALLOW_COPY_AND_ASSIGN(ShareGroup);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_SHARE_GROUP_H_
diff --git a/gpu/command_buffer/client/transfer_buffer.cc b/gpu/command_buffer/client/transfer_buffer.cc
new file mode 100644
index 0000000..da00d87
--- /dev/null
+++ b/gpu/command_buffer/client/transfer_buffer.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A class to Manage a growing transfer buffer.
+
+#include "gpu/command_buffer/client/transfer_buffer.h"
+
+#include "base/bits.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+TransferBuffer::TransferBuffer(
+ CommandBufferHelper* helper)
+ : helper_(helper),
+ result_size_(0),
+ default_buffer_size_(0),
+ min_buffer_size_(0),
+ max_buffer_size_(0),
+ alignment_(0),
+ size_to_flush_(0),
+ bytes_since_last_flush_(0),
+ buffer_id_(-1),
+ result_buffer_(NULL),
+ result_shm_offset_(0),
+ usable_(true) {
+}
+
+TransferBuffer::~TransferBuffer() {
+ Free();
+}
+
+bool TransferBuffer::Initialize(
+ unsigned int default_buffer_size,
+ unsigned int result_size,
+ unsigned int min_buffer_size,
+ unsigned int max_buffer_size,
+ unsigned int alignment,
+ unsigned int size_to_flush) {
+ result_size_ = result_size;
+ default_buffer_size_ = default_buffer_size;
+ min_buffer_size_ = min_buffer_size;
+ max_buffer_size_ = max_buffer_size;
+ alignment_ = alignment;
+ size_to_flush_ = size_to_flush;
+ ReallocateRingBuffer(default_buffer_size_ - result_size);
+ return HaveBuffer();
+}
+
+void TransferBuffer::Free() {
+ if (HaveBuffer()) {
+ TRACE_EVENT0("gpu", "TransferBuffer::Free");
+ helper_->Finish();
+ helper_->command_buffer()->DestroyTransferBuffer(buffer_id_);
+ buffer_id_ = -1;
+ buffer_ = NULL;
+ result_buffer_ = NULL;
+ result_shm_offset_ = 0;
+ ring_buffer_.reset();
+ bytes_since_last_flush_ = 0;
+ }
+}
+
+bool TransferBuffer::HaveBuffer() const {
+ DCHECK(buffer_id_ == -1 || buffer_.get());
+ return buffer_id_ != -1;
+}
+
+RingBuffer::Offset TransferBuffer::GetOffset(void* pointer) const {
+ return ring_buffer_->GetOffset(pointer);
+}
+
+void TransferBuffer::FreePendingToken(void* p, unsigned int token) {
+ ring_buffer_->FreePendingToken(p, token);
+ if (bytes_since_last_flush_ >= size_to_flush_ && size_to_flush_ > 0) {
+ helper_->Flush();
+ bytes_since_last_flush_ = 0;
+ }
+}
+
+void TransferBuffer::AllocateRingBuffer(unsigned int size) {
+ for (;size >= min_buffer_size_; size /= 2) {
+ int32 id = -1;
+ scoped_refptr<gpu::Buffer> buffer =
+ helper_->command_buffer()->CreateTransferBuffer(size, &id);
+ if (id != -1) {
+ DCHECK(buffer.get());
+ buffer_ = buffer;
+ ring_buffer_.reset(new RingBuffer(
+ alignment_,
+ result_size_,
+ buffer_->size() - result_size_,
+ helper_,
+ static_cast<char*>(buffer_->memory()) + result_size_));
+ buffer_id_ = id;
+ result_buffer_ = buffer_->memory();
+ result_shm_offset_ = 0;
+ return;
+ }
+ // we failed so don't try larger than this.
+ max_buffer_size_ = size / 2;
+ }
+ usable_ = false;
+}
+
+static unsigned int ComputePOTSize(unsigned int dimension) {
+ return (dimension == 0) ? 0 : 1 << base::bits::Log2Ceiling(dimension);
+}
+
+void TransferBuffer::ReallocateRingBuffer(unsigned int size) {
+ // What size buffer would we ask for if we needed a new one?
+ unsigned int needed_buffer_size = ComputePOTSize(size + result_size_);
+ needed_buffer_size = std::max(needed_buffer_size, min_buffer_size_);
+ needed_buffer_size = std::max(needed_buffer_size, default_buffer_size_);
+ needed_buffer_size = std::min(needed_buffer_size, max_buffer_size_);
+
+ if (usable_ && (!HaveBuffer() || needed_buffer_size > buffer_->size())) {
+ if (HaveBuffer()) {
+ Free();
+ }
+ AllocateRingBuffer(needed_buffer_size);
+ }
+}
+
+void* TransferBuffer::AllocUpTo(
+ unsigned int size, unsigned int* size_allocated) {
+ DCHECK(size_allocated);
+
+ ReallocateRingBuffer(size);
+
+ if (!HaveBuffer()) {
+ return NULL;
+ }
+
+ unsigned int max_size = ring_buffer_->GetLargestFreeOrPendingSize();
+ *size_allocated = std::min(max_size, size);
+ bytes_since_last_flush_ += *size_allocated;
+ return ring_buffer_->Alloc(*size_allocated);
+}
+
+void* TransferBuffer::Alloc(unsigned int size) {
+ ReallocateRingBuffer(size);
+
+ if (!HaveBuffer()) {
+ return NULL;
+ }
+
+ unsigned int max_size = ring_buffer_->GetLargestFreeOrPendingSize();
+ if (size > max_size) {
+ return NULL;
+ }
+
+ bytes_since_last_flush_ += size;
+ return ring_buffer_->Alloc(size);
+}
+
+void* TransferBuffer::GetResultBuffer() {
+ ReallocateRingBuffer(result_size_);
+ return result_buffer_;
+}
+
+int TransferBuffer::GetResultOffset() {
+ ReallocateRingBuffer(result_size_);
+ return result_shm_offset_;
+}
+
+int TransferBuffer::GetShmId() {
+ ReallocateRingBuffer(result_size_);
+ return buffer_id_;
+}
+
+unsigned int TransferBuffer::GetCurrentMaxAllocationWithoutRealloc() const {
+ return HaveBuffer() ? ring_buffer_->GetLargestFreeOrPendingSize() : 0;
+}
+
+unsigned int TransferBuffer::GetMaxAllocation() const {
+ return HaveBuffer() ? max_buffer_size_ - result_size_ : 0;
+}
+
+void ScopedTransferBufferPtr::Release() {
+ if (buffer_) {
+ transfer_buffer_->FreePendingToken(buffer_, helper_->InsertToken());
+ buffer_ = NULL;
+ size_ = 0;
+ }
+}
+
+void ScopedTransferBufferPtr::Reset(unsigned int new_size) {
+ Release();
+ // NOTE: we allocate buffers of size 0 so that HaveBuffer will be true, so
+ // that address will return a pointer just like malloc, and so that GetShmId
+ // will be valid. That has the side effect that we'll insert a token on free.
+ // We could add code skip the token for a zero size buffer but it doesn't seem
+ // worth the complication.
+ buffer_ = transfer_buffer_->AllocUpTo(new_size, &size_);
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/transfer_buffer.h b/gpu/command_buffer/client/transfer_buffer.h
new file mode 100644
index 0000000..348ad32
--- /dev/null
+++ b/gpu/command_buffer/client/transfer_buffer.h
@@ -0,0 +1,199 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "gpu/command_buffer/client/ring_buffer.h"
+#include "gpu/command_buffer/common/buffer.h"
+#include "gpu/command_buffer/common/gles2_cmd_utils.h"
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+class CommandBufferHelper;
+
+// Interface for managing the transfer buffer.
+class GPU_EXPORT TransferBufferInterface {
+ public:
+ TransferBufferInterface() { }
+ virtual ~TransferBufferInterface() { }
+
+ virtual bool Initialize(
+ unsigned int buffer_size,
+ unsigned int result_size,
+ unsigned int min_buffer_size,
+ unsigned int max_buffer_size,
+ unsigned int alignment,
+ unsigned int size_to_flush) = 0;
+
+ virtual int GetShmId() = 0;
+ virtual void* GetResultBuffer() = 0;
+ virtual int GetResultOffset() = 0;
+
+ virtual void Free() = 0;
+
+ virtual bool HaveBuffer() const = 0;
+
+ // Allocates up to size bytes.
+ virtual void* AllocUpTo(unsigned int size, unsigned int* size_allocated) = 0;
+
+ // Allocates size bytes.
+ // Note: Alloc will fail if it can not return size bytes.
+ virtual void* Alloc(unsigned int size) = 0;
+
+ virtual RingBuffer::Offset GetOffset(void* pointer) const = 0;
+
+ virtual void FreePendingToken(void* p, unsigned int token) = 0;
+};
+
+// Class that manages the transfer buffer.
+class GPU_EXPORT TransferBuffer : public TransferBufferInterface {
+ public:
+ TransferBuffer(CommandBufferHelper* helper);
+ virtual ~TransferBuffer();
+
+ // Overridden from TransferBufferInterface.
+ virtual bool Initialize(
+ unsigned int default_buffer_size,
+ unsigned int result_size,
+ unsigned int min_buffer_size,
+ unsigned int max_buffer_size,
+ unsigned int alignment,
+ unsigned int size_to_flush) OVERRIDE;
+ virtual int GetShmId() OVERRIDE;
+ virtual void* GetResultBuffer() OVERRIDE;
+ virtual int GetResultOffset() OVERRIDE;
+ virtual void Free() OVERRIDE;
+ virtual bool HaveBuffer() const OVERRIDE;
+ virtual void* AllocUpTo(
+ unsigned int size, unsigned int* size_allocated) OVERRIDE;
+ virtual void* Alloc(unsigned int size) OVERRIDE;
+ virtual RingBuffer::Offset GetOffset(void* pointer) const OVERRIDE;
+ virtual void FreePendingToken(void* p, unsigned int token) OVERRIDE;
+
+ // These are for testing.
+ unsigned int GetCurrentMaxAllocationWithoutRealloc() const;
+ unsigned int GetMaxAllocation() const;
+
+ private:
+ // Tries to reallocate the ring buffer if it's not large enough for size.
+ void ReallocateRingBuffer(unsigned int size);
+
+ void AllocateRingBuffer(unsigned int size);
+
+ CommandBufferHelper* helper_;
+ scoped_ptr<RingBuffer> ring_buffer_;
+
+ // size reserved for results
+ unsigned int result_size_;
+
+ // default size. Size we want when starting or re-allocating
+ unsigned int default_buffer_size_;
+
+ // min size we'll consider successful
+ unsigned int min_buffer_size_;
+
+ // max size we'll let the buffer grow
+ unsigned int max_buffer_size_;
+
+ // alignment for allocations
+ unsigned int alignment_;
+
+ // Size at which to do an async flush. 0 = never.
+ unsigned int size_to_flush_;
+
+ // Number of bytes since we last flushed.
+ unsigned int bytes_since_last_flush_;
+
+ // the current buffer.
+ scoped_refptr<gpu::Buffer> buffer_;
+
+ // id of buffer. -1 = no buffer
+ int32 buffer_id_;
+
+ // address of result area
+ void* result_buffer_;
+
+ // offset to result area
+ uint32 result_shm_offset_;
+
+ // false if we failed to allocate min_buffer_size
+ bool usable_;
+};
+
+// A class that will manage the lifetime of a transferbuffer allocation.
+class GPU_EXPORT ScopedTransferBufferPtr {
+ public:
+ ScopedTransferBufferPtr(
+ unsigned int size,
+ CommandBufferHelper* helper,
+ TransferBufferInterface* transfer_buffer)
+ : buffer_(NULL),
+ size_(0),
+ helper_(helper),
+ transfer_buffer_(transfer_buffer) {
+ Reset(size);
+ }
+
+ ~ScopedTransferBufferPtr() {
+ Release();
+ }
+
+ bool valid() const {
+ return buffer_ != NULL;
+ }
+
+ unsigned int size() const {
+ return size_;
+ }
+
+ int shm_id() const {
+ return transfer_buffer_->GetShmId();
+ }
+
+ RingBuffer::Offset offset() const {
+ return transfer_buffer_->GetOffset(buffer_);
+ }
+
+ void* address() const {
+ return buffer_;
+ }
+
+ void Release();
+
+ void Reset(unsigned int new_size);
+
+ private:
+ void* buffer_;
+ unsigned int size_;
+ CommandBufferHelper* helper_;
+ TransferBufferInterface* transfer_buffer_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedTransferBufferPtr);
+};
+
+template <typename T>
+class ScopedTransferBufferArray : public ScopedTransferBufferPtr {
+ public:
+ ScopedTransferBufferArray(
+ unsigned int num_elements,
+ CommandBufferHelper* helper, TransferBufferInterface* transfer_buffer)
+ : ScopedTransferBufferPtr(
+ num_elements * sizeof(T), helper, transfer_buffer) {
+ }
+
+ T* elements() {
+ return static_cast<T*>(address());
+ }
+
+ unsigned int num_elements() const {
+ return size() / sizeof(T);
+ }
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
diff --git a/gpu/command_buffer/client/transfer_buffer_unittest.cc b/gpu/command_buffer/client/transfer_buffer_unittest.cc
new file mode 100644
index 0000000..cb8558f
--- /dev/null
+++ b/gpu/command_buffer/client/transfer_buffer_unittest.cc
@@ -0,0 +1,485 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests for the Command Buffer Helper.
+
+#include "gpu/command_buffer/client/transfer_buffer.h"
+
+#include "base/compiler_specific.h"
+#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/common/command_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::_;
+using ::testing::AtMost;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+using ::testing::StrictMock;
+
+namespace gpu {
+
+
+class TransferBufferTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+ static const unsigned int kStartingOffset = 64;
+ static const unsigned int kAlignment = 4;
+ static const size_t kTransferBufferSize = 256;
+
+ TransferBufferTest()
+ : transfer_buffer_id_(0) {
+ }
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ virtual void Initialize(unsigned int size_to_flush) {
+ ASSERT_TRUE(transfer_buffer_->Initialize(
+ kTransferBufferSize,
+ kStartingOffset,
+ kTransferBufferSize,
+ kTransferBufferSize,
+ kAlignment,
+ size_to_flush));
+ }
+
+ MockClientCommandBufferMockFlush* command_buffer() const {
+ return command_buffer_.get();
+ }
+
+ scoped_ptr<MockClientCommandBufferMockFlush> command_buffer_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ scoped_ptr<TransferBuffer> transfer_buffer_;
+ int32 transfer_buffer_id_;
+};
+
+void TransferBufferTest::SetUp() {
+ command_buffer_.reset(new StrictMock<MockClientCommandBufferMockFlush>());
+ ASSERT_TRUE(command_buffer_->Initialize());
+
+ helper_.reset(new CommandBufferHelper(command_buffer()));
+ ASSERT_TRUE(helper_->Initialize(kCommandBufferSizeBytes));
+
+ transfer_buffer_id_ = command_buffer()->GetNextFreeTransferBufferId();
+
+ transfer_buffer_.reset(new TransferBuffer(helper_.get()));
+}
+
+void TransferBufferTest::TearDown() {
+ if (transfer_buffer_->HaveBuffer()) {
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ // For command buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*command_buffer(), OnFlush()).Times(AtMost(1));
+ transfer_buffer_.reset();
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const int32 TransferBufferTest::kNumCommandEntries;
+const int32 TransferBufferTest::kCommandBufferSizeBytes;
+const unsigned int TransferBufferTest::kStartingOffset;
+const unsigned int TransferBufferTest::kAlignment;
+const size_t TransferBufferTest::kTransferBufferSize;
+#endif
+
+TEST_F(TransferBufferTest, Basic) {
+ Initialize(0);
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+ EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
+ EXPECT_EQ(
+ kTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+}
+
+TEST_F(TransferBufferTest, Free) {
+ Initialize(0);
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+ EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+ // See that it gets reallocated.
+ EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+ // See that it gets reallocated.
+ EXPECT_TRUE(transfer_buffer_->GetResultBuffer() != NULL);
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+ // See that it gets reallocated.
+ unsigned int size = 0;
+ void* data = transfer_buffer_->AllocUpTo(1, &size);
+ EXPECT_TRUE(data != NULL);
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+ transfer_buffer_->FreePendingToken(data, 1);
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+ // See that it gets reallocated.
+ transfer_buffer_->GetResultOffset();
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+
+ EXPECT_EQ(
+ kTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+
+ // Test freeing twice.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ transfer_buffer_->Free();
+}
+
+TEST_F(TransferBufferTest, TooLargeAllocation) {
+ Initialize(0);
+ // Check that we can't allocate large than max size.
+ void* ptr = transfer_buffer_->Alloc(kTransferBufferSize + 1);
+ EXPECT_TRUE(ptr == NULL);
+ // Check we if we try to allocate larger than max we get max.
+ unsigned int size_allocated = 0;
+ ptr = transfer_buffer_->AllocUpTo(
+ kTransferBufferSize + 1, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kTransferBufferSize - kStartingOffset, size_allocated);
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
+TEST_F(TransferBufferTest, MemoryAlignmentAfterZeroAllocation) {
+ Initialize(32u);
+ void* ptr = transfer_buffer_->Alloc(0);
+ EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u);
+ transfer_buffer_->FreePendingToken(ptr, static_cast<unsigned int>(-1));
+ // Check that the pointer is aligned on the following allocation.
+ ptr = transfer_buffer_->Alloc(4);
+ EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u);
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
+TEST_F(TransferBufferTest, Flush) {
+ Initialize(16u);
+ unsigned int size_allocated = 0;
+ for (int i = 0; i < 8; ++i) {
+ void* ptr = transfer_buffer_->AllocUpTo(8u, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(8u, size_allocated);
+ if (i % 2) {
+ EXPECT_CALL(*command_buffer(), Flush(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken());
+ }
+ for (int i = 0; i < 8; ++i) {
+ void* ptr = transfer_buffer_->Alloc(8u);
+ ASSERT_TRUE(ptr != NULL);
+ if (i % 2) {
+ EXPECT_CALL(*command_buffer(), Flush(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken());
+ }
+}
+
+class MockClientCommandBufferCanFail : public MockClientCommandBufferMockFlush {
+ public:
+ MockClientCommandBufferCanFail() {
+ }
+ virtual ~MockClientCommandBufferCanFail() {
+ }
+
+ MOCK_METHOD2(CreateTransferBuffer,
+ scoped_refptr<Buffer>(size_t size, int32* id));
+
+ scoped_refptr<gpu::Buffer> RealCreateTransferBuffer(size_t size, int32* id) {
+ return MockCommandBufferBase::CreateTransferBuffer(size, id);
+ }
+};
+
+class TransferBufferExpandContractTest : public testing::Test {
+ protected:
+ static const int32 kNumCommandEntries = 400;
+ static const int32 kCommandBufferSizeBytes =
+ kNumCommandEntries * sizeof(CommandBufferEntry);
+ static const unsigned int kStartingOffset = 64;
+ static const unsigned int kAlignment = 4;
+ static const size_t kStartTransferBufferSize = 256;
+ static const size_t kMaxTransferBufferSize = 1024;
+ static const size_t kMinTransferBufferSize = 128;
+
+ TransferBufferExpandContractTest()
+ : transfer_buffer_id_(0) {
+ }
+
+ virtual void SetUp() OVERRIDE;
+ virtual void TearDown() OVERRIDE;
+
+ MockClientCommandBufferCanFail* command_buffer() const {
+ return command_buffer_.get();
+ }
+
+ scoped_ptr<MockClientCommandBufferCanFail> command_buffer_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ scoped_ptr<TransferBuffer> transfer_buffer_;
+ int32 transfer_buffer_id_;
+};
+
+void TransferBufferExpandContractTest::SetUp() {
+ command_buffer_.reset(new StrictMock<MockClientCommandBufferCanFail>());
+ ASSERT_TRUE(command_buffer_->Initialize());
+
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kCommandBufferSizeBytes, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ helper_.reset(new CommandBufferHelper(command_buffer()));
+ ASSERT_TRUE(helper_->Initialize(kCommandBufferSizeBytes));
+
+ transfer_buffer_id_ = command_buffer()->GetNextFreeTransferBufferId();
+
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kStartTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ transfer_buffer_.reset(new TransferBuffer(helper_.get()));
+ ASSERT_TRUE(transfer_buffer_->Initialize(
+ kStartTransferBufferSize,
+ kStartingOffset,
+ kMinTransferBufferSize,
+ kMaxTransferBufferSize,
+ kAlignment,
+ 0));
+}
+
+void TransferBufferExpandContractTest::TearDown() {
+ if (transfer_buffer_->HaveBuffer()) {
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ }
+ // For command buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_.reset();
+}
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const int32 TransferBufferExpandContractTest::kNumCommandEntries;
+const int32 TransferBufferExpandContractTest::kCommandBufferSizeBytes;
+const unsigned int TransferBufferExpandContractTest::kStartingOffset;
+const unsigned int TransferBufferExpandContractTest::kAlignment;
+const size_t TransferBufferExpandContractTest::kStartTransferBufferSize;
+const size_t TransferBufferExpandContractTest::kMaxTransferBufferSize;
+const size_t TransferBufferExpandContractTest::kMinTransferBufferSize;
+#endif
+
+TEST_F(TransferBufferExpandContractTest, Expand) {
+ // Check it starts at starting size.
+ EXPECT_EQ(
+ kStartTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kStartTransferBufferSize * 2, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ // Try next power of 2.
+ const size_t kSize1 = 512 - kStartingOffset;
+ unsigned int size_allocated = 0;
+ void* ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kSize1, size_allocated);
+ EXPECT_EQ(kSize1, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kMaxTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ // Try next power of 2.
+ const size_t kSize2 = 1024 - kStartingOffset;
+ ptr = transfer_buffer_->AllocUpTo(kSize2, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kSize2, size_allocated);
+ EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+
+ // Try next one more. Should not go past max.
+ size_allocated = 0;
+ const size_t kSize3 = kSize2 + 1;
+ ptr = transfer_buffer_->AllocUpTo(kSize3, &size_allocated);
+ EXPECT_EQ(kSize2, size_allocated);
+ EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
+TEST_F(TransferBufferExpandContractTest, Contract) {
+ // Check it starts at starting size.
+ EXPECT_EQ(
+ kStartTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+
+ // Try to allocate again, fail first request
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kStartTransferBufferSize, _))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kMinTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ const size_t kSize1 = 256 - kStartingOffset;
+ const size_t kSize2 = 128 - kStartingOffset;
+ unsigned int size_allocated = 0;
+ void* ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kSize2, size_allocated);
+ EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+
+ // Try to allocate again,
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kMinTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+
+ ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
+ ASSERT_TRUE(ptr != NULL);
+ EXPECT_EQ(kSize2, size_allocated);
+ EXPECT_EQ(kSize2, transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+ transfer_buffer_->FreePendingToken(ptr, 1);
+}
+
+TEST_F(TransferBufferExpandContractTest, OutOfMemory) {
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+
+ // Try to allocate again, fail both requests.
+ EXPECT_CALL(*command_buffer(), CreateTransferBuffer(_, _))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
+ .WillOnce(
+ DoAll(SetArgPointee<1>(-1), Return(scoped_refptr<gpu::Buffer>())))
+ .RetiresOnSaturation();
+
+ const size_t kSize1 = 512 - kStartingOffset;
+ unsigned int size_allocated = 0;
+ void* ptr = transfer_buffer_->AllocUpTo(kSize1, &size_allocated);
+ ASSERT_TRUE(ptr == NULL);
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+}
+
+TEST_F(TransferBufferExpandContractTest, ReallocsToDefault) {
+ // Free buffer.
+ EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
+ .Times(1)
+ .RetiresOnSaturation();
+ transfer_buffer_->Free();
+ // See it's freed.
+ EXPECT_FALSE(transfer_buffer_->HaveBuffer());
+
+ // See that it gets reallocated.
+ EXPECT_CALL(*command_buffer(),
+ CreateTransferBuffer(kStartTransferBufferSize, _))
+ .WillOnce(Invoke(
+ command_buffer(),
+ &MockClientCommandBufferCanFail::RealCreateTransferBuffer))
+ .RetiresOnSaturation();
+ EXPECT_EQ(transfer_buffer_id_, transfer_buffer_->GetShmId());
+ EXPECT_TRUE(transfer_buffer_->HaveBuffer());
+
+ // Check it's the default size.
+ EXPECT_EQ(
+ kStartTransferBufferSize - kStartingOffset,
+ transfer_buffer_->GetCurrentMaxAllocationWithoutRealloc());
+}
+
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/vertex_array_object_manager.cc b/gpu/command_buffer/client/vertex_array_object_manager.cc
new file mode 100644
index 0000000..3e98bd0
--- /dev/null
+++ b/gpu/command_buffer/client/vertex_array_object_manager.cc
@@ -0,0 +1,640 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/vertex_array_object_manager.h"
+
+#include "base/logging.h"
+#include "gpu/command_buffer/client/gles2_cmd_helper.h"
+#include "gpu/command_buffer/client/gles2_implementation.h"
+
+#if defined(__native_client__) && !defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+#define GLES2_SUPPORT_CLIENT_SIDE_ARRAYS
+#endif
+
+namespace gpu {
+namespace gles2 {
+
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+
+static GLsizei RoundUpToMultipleOf4(GLsizei size) {
+ return (size + 3) & ~3;
+}
+
+#endif // defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+
+// A 32-bit and 64-bit compatible way of converting a pointer to a GLuint.
+static GLuint ToGLuint(const void* ptr) {
+ return static_cast<GLuint>(reinterpret_cast<size_t>(ptr));
+}
+
+// This class tracks VertexAttribPointers and helps emulate client side buffers.
+//
+// The way client side buffers work is we shadow all the Vertex Attribs so we
+// know which ones are pointing to client side buffers.
+//
+// At Draw time, for any attribs pointing to client side buffers we copy them
+// to a special VBO and reset the actual vertex attrib pointers to point to this
+// VBO.
+//
+// This also means we have to catch calls to query those values so that when
+// an attrib is a client side buffer we pass the info back the user expects.
+
+class GLES2_IMPL_EXPORT VertexArrayObject {
+ public:
+ // Info about Vertex Attributes. This is used to track what the user currently
+ // has bound on each Vertex Attribute so we can simulate client side buffers
+ // at glDrawXXX time.
+ class VertexAttrib {
+ public:
+ VertexAttrib()
+ : enabled_(false),
+ buffer_id_(0),
+ size_(4),
+ type_(GL_FLOAT),
+ normalized_(GL_FALSE),
+ pointer_(NULL),
+ gl_stride_(0),
+ divisor_(0) {
+ }
+
+ bool enabled() const {
+ return enabled_;
+ }
+
+ void set_enabled(bool enabled) {
+ enabled_ = enabled;
+ }
+
+ GLuint buffer_id() const {
+ return buffer_id_;
+ }
+
+ void set_buffer_id(GLuint id) {
+ buffer_id_ = id;
+ }
+
+ GLenum type() const {
+ return type_;
+ }
+
+ GLint size() const {
+ return size_;
+ }
+
+ GLsizei stride() const {
+ return gl_stride_;
+ }
+
+ GLboolean normalized() const {
+ return normalized_;
+ }
+
+ const GLvoid* pointer() const {
+ return pointer_;
+ }
+
+ bool IsClientSide() const {
+ return buffer_id_ == 0;
+ }
+
+ GLuint divisor() const {
+ return divisor_;
+ }
+
+ void SetInfo(
+ GLuint buffer_id,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei gl_stride,
+ const GLvoid* pointer) {
+ buffer_id_ = buffer_id;
+ size_ = size;
+ type_ = type;
+ normalized_ = normalized;
+ gl_stride_ = gl_stride;
+ pointer_ = pointer;
+ }
+
+ void SetDivisor(GLuint divisor) {
+ divisor_ = divisor;
+ }
+
+ private:
+ // Whether or not this attribute is enabled.
+ bool enabled_;
+
+ // The id of the buffer. 0 = client side buffer.
+ GLuint buffer_id_;
+
+ // Number of components (1, 2, 3, 4).
+ GLint size_;
+
+ // GL_BYTE, GL_FLOAT, etc. See glVertexAttribPointer.
+ GLenum type_;
+
+ // GL_TRUE or GL_FALSE
+ GLboolean normalized_;
+
+ // The pointer/offset into the buffer.
+ const GLvoid* pointer_;
+
+ // The stride that will be used to access the buffer. This is the bogus GL
+ // stride where 0 = compute the stride based on size and type.
+ GLsizei gl_stride_;
+
+ // Divisor, for geometry instancing.
+ GLuint divisor_;
+ };
+
+ typedef std::vector<VertexAttrib> VertexAttribs;
+
+ explicit VertexArrayObject(GLuint max_vertex_attribs);
+
+ void UnbindBuffer(GLuint id);
+
+ bool BindElementArray(GLuint id);
+
+ bool HaveEnabledClientSideBuffers() const;
+
+ void SetAttribEnable(GLuint index, bool enabled);
+
+ void SetAttribPointer(
+ GLuint buffer_id,
+ GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride,
+ const void* ptr);
+
+ bool GetVertexAttrib(
+ GLuint index, GLenum pname, uint32* param) const;
+
+ void SetAttribDivisor(GLuint index, GLuint divisor);
+
+ bool GetAttribPointer(GLuint index, GLenum pname, void** ptr) const;
+
+ const VertexAttribs& vertex_attribs() const {
+ return vertex_attribs_;
+ }
+
+ GLuint bound_element_array_buffer() const {
+ return bound_element_array_buffer_id_;
+ }
+
+ private:
+ const VertexAttrib* GetAttrib(GLuint index) const;
+
+ int num_client_side_pointers_enabled_;
+
+ // The currently bound element array buffer.
+ GLuint bound_element_array_buffer_id_;
+
+ VertexAttribs vertex_attribs_;
+
+ DISALLOW_COPY_AND_ASSIGN(VertexArrayObject);
+};
+
+VertexArrayObject::VertexArrayObject(GLuint max_vertex_attribs)
+ : num_client_side_pointers_enabled_(0),
+ bound_element_array_buffer_id_(0) {
+ vertex_attribs_.resize(max_vertex_attribs);
+}
+
+void VertexArrayObject::UnbindBuffer(GLuint id) {
+ if (id == 0) {
+ return;
+ }
+ for (size_t ii = 0; ii < vertex_attribs_.size(); ++ii) {
+ VertexAttrib& attrib = vertex_attribs_[ii];
+ if (attrib.buffer_id() == id) {
+ attrib.set_buffer_id(0);
+ if (attrib.enabled()) {
+ ++num_client_side_pointers_enabled_;
+ }
+ }
+ }
+ if (bound_element_array_buffer_id_ == id) {
+ bound_element_array_buffer_id_ = 0;
+ }
+}
+
+bool VertexArrayObject::BindElementArray(GLuint id) {
+ if (id == bound_element_array_buffer_id_) {
+ return false;
+ }
+ bound_element_array_buffer_id_ = id;
+ return true;
+}
+bool VertexArrayObject::HaveEnabledClientSideBuffers() const {
+ return num_client_side_pointers_enabled_ > 0;
+}
+
+void VertexArrayObject::SetAttribEnable(GLuint index, bool enabled) {
+ if (index < vertex_attribs_.size()) {
+ VertexAttrib& attrib = vertex_attribs_[index];
+ if (attrib.enabled() != enabled) {
+ if (attrib.IsClientSide()) {
+ num_client_side_pointers_enabled_ += enabled ? 1 : -1;
+ DCHECK_GE(num_client_side_pointers_enabled_, 0);
+ }
+ attrib.set_enabled(enabled);
+ }
+ }
+}
+
+void VertexArrayObject::SetAttribPointer(
+ GLuint buffer_id,
+ GLuint index,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) {
+ if (index < vertex_attribs_.size()) {
+ VertexAttrib& attrib = vertex_attribs_[index];
+ if (attrib.IsClientSide() && attrib.enabled()) {
+ --num_client_side_pointers_enabled_;
+ DCHECK_GE(num_client_side_pointers_enabled_, 0);
+ }
+
+ attrib.SetInfo(buffer_id, size, type, normalized, stride, ptr);
+
+ if (attrib.IsClientSide() && attrib.enabled()) {
+ ++num_client_side_pointers_enabled_;
+ }
+ }
+}
+
+bool VertexArrayObject::GetVertexAttrib(
+ GLuint index, GLenum pname, uint32* param) const {
+ const VertexAttrib* attrib = GetAttrib(index);
+ if (!attrib) {
+ return false;
+ }
+
+ switch (pname) {
+ case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING:
+ *param = attrib->buffer_id();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_ENABLED:
+ *param = attrib->enabled();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_SIZE:
+ *param = attrib->size();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_STRIDE:
+ *param = attrib->stride();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_TYPE:
+ *param = attrib->type();
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_NORMALIZED:
+ *param = attrib->normalized();
+ break;
+ default:
+ return false; // pass through to service side.
+ break;
+ }
+ return true;
+}
+
+void VertexArrayObject::SetAttribDivisor(GLuint index, GLuint divisor) {
+ if (index < vertex_attribs_.size()) {
+ VertexAttrib& attrib = vertex_attribs_[index];
+ attrib.SetDivisor(divisor);
+ }
+}
+
+// Gets the Attrib pointer for an attrib but only if it's a client side
+// pointer. Returns true if it got the pointer.
+bool VertexArrayObject::GetAttribPointer(
+ GLuint index, GLenum pname, void** ptr) const {
+ const VertexAttrib* attrib = GetAttrib(index);
+ if (attrib && pname == GL_VERTEX_ATTRIB_ARRAY_POINTER) {
+ *ptr = const_cast<void*>(attrib->pointer());
+ return true;
+ }
+ return false;
+}
+
+// Gets an attrib if it's in range and it's client side.
+const VertexArrayObject::VertexAttrib* VertexArrayObject::GetAttrib(
+ GLuint index) const {
+ if (index < vertex_attribs_.size()) {
+ const VertexAttrib* attrib = &vertex_attribs_[index];
+ return attrib;
+ }
+ return NULL;
+}
+
+VertexArrayObjectManager::VertexArrayObjectManager(
+ GLuint max_vertex_attribs,
+ GLuint array_buffer_id,
+ GLuint element_array_buffer_id)
+ : max_vertex_attribs_(max_vertex_attribs),
+ array_buffer_id_(array_buffer_id),
+ array_buffer_size_(0),
+ array_buffer_offset_(0),
+ element_array_buffer_id_(element_array_buffer_id),
+ element_array_buffer_size_(0),
+ collection_buffer_size_(0),
+ default_vertex_array_object_(new VertexArrayObject(max_vertex_attribs)),
+ bound_vertex_array_object_(default_vertex_array_object_) {
+}
+
+VertexArrayObjectManager::~VertexArrayObjectManager() {
+ for (VertexArrayObjectMap::iterator it = vertex_array_objects_.begin();
+ it != vertex_array_objects_.end(); ++it) {
+ delete it->second;
+ }
+ delete default_vertex_array_object_;
+}
+
+bool VertexArrayObjectManager::IsReservedId(GLuint id) const {
+ return (id != 0 &&
+ (id == array_buffer_id_ || id == element_array_buffer_id_));
+}
+
+GLuint VertexArrayObjectManager::bound_element_array_buffer() const {
+ return bound_vertex_array_object_->bound_element_array_buffer();
+}
+
+void VertexArrayObjectManager::UnbindBuffer(GLuint id) {
+ bound_vertex_array_object_->UnbindBuffer(id);
+}
+
+bool VertexArrayObjectManager::BindElementArray(GLuint id) {
+ return bound_vertex_array_object_->BindElementArray(id);
+}
+
+void VertexArrayObjectManager::GenVertexArrays(
+ GLsizei n, const GLuint* arrays) {
+ DCHECK_GE(n, 0);
+ for (GLsizei i = 0; i < n; ++i) {
+ std::pair<VertexArrayObjectMap::iterator, bool> result =
+ vertex_array_objects_.insert(std::make_pair(
+ arrays[i], new VertexArrayObject(max_vertex_attribs_)));
+ DCHECK(result.second);
+ }
+}
+
+void VertexArrayObjectManager::DeleteVertexArrays(
+ GLsizei n, const GLuint* arrays) {
+ DCHECK_GE(n, 0);
+ for (GLsizei i = 0; i < n; ++i) {
+ GLuint id = arrays[i];
+ if (id) {
+ VertexArrayObjectMap::iterator it = vertex_array_objects_.find(id);
+ if (it != vertex_array_objects_.end()) {
+ if (bound_vertex_array_object_ == it->second) {
+ bound_vertex_array_object_ = default_vertex_array_object_;
+ }
+ delete it->second;
+ vertex_array_objects_.erase(it);
+ }
+ }
+ }
+}
+
+bool VertexArrayObjectManager::BindVertexArray(GLuint array, bool* changed) {
+ *changed = false;
+ VertexArrayObject* vertex_array_object = default_vertex_array_object_;
+ if (array != 0) {
+ VertexArrayObjectMap::iterator it = vertex_array_objects_.find(array);
+ if (it == vertex_array_objects_.end()) {
+ return false;
+ }
+ vertex_array_object = it->second;
+ }
+ *changed = vertex_array_object != bound_vertex_array_object_;
+ bound_vertex_array_object_ = vertex_array_object;
+ return true;
+}
+
+bool VertexArrayObjectManager::HaveEnabledClientSideBuffers() const {
+ return bound_vertex_array_object_->HaveEnabledClientSideBuffers();
+}
+
+void VertexArrayObjectManager::SetAttribEnable(GLuint index, bool enabled) {
+ bound_vertex_array_object_->SetAttribEnable(index, enabled);
+}
+
+bool VertexArrayObjectManager::GetVertexAttrib(
+ GLuint index, GLenum pname, uint32* param) {
+ return bound_vertex_array_object_->GetVertexAttrib(index, pname, param);
+}
+
+bool VertexArrayObjectManager::GetAttribPointer(
+ GLuint index, GLenum pname, void** ptr) const {
+ return bound_vertex_array_object_->GetAttribPointer(index, pname, ptr);
+}
+
+bool VertexArrayObjectManager::SetAttribPointer(
+ GLuint buffer_id,
+ GLuint index,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr) {
+ // Client side arrays are not allowed in vaos.
+ if (buffer_id == 0 && !IsDefaultVAOBound()) {
+ return false;
+ }
+ bound_vertex_array_object_->SetAttribPointer(
+ buffer_id, index, size, type, normalized, stride, ptr);
+ return true;
+}
+
+void VertexArrayObjectManager::SetAttribDivisor(GLuint index, GLuint divisor) {
+ bound_vertex_array_object_->SetAttribDivisor(index, divisor);
+}
+
+// Collects the data into the collection buffer and returns the number of
+// bytes collected.
+GLsizei VertexArrayObjectManager::CollectData(
+ const void* data,
+ GLsizei bytes_per_element,
+ GLsizei real_stride,
+ GLsizei num_elements) {
+ GLsizei bytes_needed = bytes_per_element * num_elements;
+ if (collection_buffer_size_ < bytes_needed) {
+ collection_buffer_.reset(new int8[bytes_needed]);
+ collection_buffer_size_ = bytes_needed;
+ }
+ const int8* src = static_cast<const int8*>(data);
+ int8* dst = collection_buffer_.get();
+ int8* end = dst + bytes_per_element * num_elements;
+ for (; dst < end; src += real_stride, dst += bytes_per_element) {
+ memcpy(dst, src, bytes_per_element);
+ }
+ return bytes_needed;
+}
+
+bool VertexArrayObjectManager::IsDefaultVAOBound() const {
+ return bound_vertex_array_object_ == default_vertex_array_object_;
+}
+
+// Returns true if buffers were setup.
+bool VertexArrayObjectManager::SetupSimulatedClientSideBuffers(
+ const char* function_name,
+ GLES2Implementation* gl,
+ GLES2CmdHelper* gl_helper,
+ GLsizei num_elements,
+ GLsizei primcount,
+ bool* simulated) {
+ *simulated = false;
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ if (!bound_vertex_array_object_->HaveEnabledClientSideBuffers()) {
+ return true;
+ }
+ if (!IsDefaultVAOBound()) {
+ gl->SetGLError(
+ GL_INVALID_OPERATION, function_name,
+ "client side arrays not allowed with vertex array object");
+ return false;
+ }
+ *simulated = true;
+ GLsizei total_size = 0;
+ // Compute the size of the buffer we need.
+ const VertexArrayObject::VertexAttribs& vertex_attribs =
+ bound_vertex_array_object_->vertex_attribs();
+ for (GLuint ii = 0; ii < vertex_attribs.size(); ++ii) {
+ const VertexArrayObject::VertexAttrib& attrib = vertex_attribs[ii];
+ if (attrib.IsClientSide() && attrib.enabled()) {
+ size_t bytes_per_element =
+ GLES2Util::GetGLTypeSizeForTexturesAndBuffers(attrib.type()) *
+ attrib.size();
+ GLsizei elements = (primcount && attrib.divisor() > 0) ?
+ ((primcount - 1) / attrib.divisor() + 1) : num_elements;
+ total_size += RoundUpToMultipleOf4(bytes_per_element * elements);
+ }
+ }
+ gl_helper->BindBuffer(GL_ARRAY_BUFFER, array_buffer_id_);
+ array_buffer_offset_ = 0;
+ if (total_size > array_buffer_size_) {
+ gl->BufferDataHelper(GL_ARRAY_BUFFER, total_size, NULL, GL_DYNAMIC_DRAW);
+ array_buffer_size_ = total_size;
+ }
+ for (GLuint ii = 0; ii < vertex_attribs.size(); ++ii) {
+ const VertexArrayObject::VertexAttrib& attrib = vertex_attribs[ii];
+ if (attrib.IsClientSide() && attrib.enabled()) {
+ size_t bytes_per_element =
+ GLES2Util::GetGLTypeSizeForTexturesAndBuffers(attrib.type()) *
+ attrib.size();
+ GLsizei real_stride = attrib.stride() ?
+ attrib.stride() : static_cast<GLsizei>(bytes_per_element);
+ GLsizei elements = (primcount && attrib.divisor() > 0) ?
+ ((primcount - 1) / attrib.divisor() + 1) : num_elements;
+ GLsizei bytes_collected = CollectData(
+ attrib.pointer(), bytes_per_element, real_stride, elements);
+ gl->BufferSubDataHelper(
+ GL_ARRAY_BUFFER, array_buffer_offset_, bytes_collected,
+ collection_buffer_.get());
+ gl_helper->VertexAttribPointer(
+ ii, attrib.size(), attrib.type(), attrib.normalized(), 0,
+ array_buffer_offset_);
+ array_buffer_offset_ += RoundUpToMultipleOf4(bytes_collected);
+ DCHECK_LE(array_buffer_offset_, array_buffer_size_);
+ }
+ }
+#endif // defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ return true;
+}
+
+// Copies in indices to the service and returns the highest index accessed + 1
+bool VertexArrayObjectManager::SetupSimulatedIndexAndClientSideBuffers(
+ const char* function_name,
+ GLES2Implementation* gl,
+ GLES2CmdHelper* gl_helper,
+ GLsizei count,
+ GLenum type,
+ GLsizei primcount,
+ const void* indices,
+ GLuint* offset,
+ bool* simulated) {
+ *simulated = false;
+ *offset = ToGLuint(indices);
+#if defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ GLsizei num_elements = 0;
+ if (bound_vertex_array_object_->bound_element_array_buffer() == 0) {
+ *simulated = true;
+ *offset = 0;
+ GLsizei max_index = -1;
+ switch (type) {
+ case GL_UNSIGNED_BYTE: {
+ const uint8* src = static_cast<const uint8*>(indices);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (src[ii] > max_index) {
+ max_index = src[ii];
+ }
+ }
+ break;
+ }
+ case GL_UNSIGNED_SHORT: {
+ const uint16* src = static_cast<const uint16*>(indices);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ if (src[ii] > max_index) {
+ max_index = src[ii];
+ }
+ }
+ break;
+ }
+ case GL_UNSIGNED_INT: {
+ uint32 max_glsizei = static_cast<uint32>(
+ std::numeric_limits<GLsizei>::max());
+ const uint32* src = static_cast<const uint32*>(indices);
+ for (GLsizei ii = 0; ii < count; ++ii) {
+ // Other parts of the API use GLsizei (signed) to store limits.
+ // As such, if we encounter a index that cannot be represented with
+ // an unsigned int we need to flag it as an error here.
+ if(src[ii] > max_glsizei) {
+ gl->SetGLError(
+ GL_INVALID_OPERATION, function_name, "index too large.");
+ return false;
+ }
+ GLsizei signed_index = static_cast<GLsizei>(src[ii]);
+ if (signed_index > max_index) {
+ max_index = signed_index;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ gl_helper->BindBuffer(GL_ELEMENT_ARRAY_BUFFER, element_array_buffer_id_);
+ GLsizei bytes_per_element =
+ GLES2Util::GetGLTypeSizeForTexturesAndBuffers(type);
+ GLsizei bytes_needed = bytes_per_element * count;
+ if (bytes_needed > element_array_buffer_size_) {
+ element_array_buffer_size_ = bytes_needed;
+ gl->BufferDataHelper(
+ GL_ELEMENT_ARRAY_BUFFER, bytes_needed, NULL, GL_DYNAMIC_DRAW);
+ }
+ gl->BufferSubDataHelper(
+ GL_ELEMENT_ARRAY_BUFFER, 0, bytes_needed, indices);
+
+ num_elements = max_index + 1;
+ } else if (bound_vertex_array_object_->HaveEnabledClientSideBuffers()) {
+ // Index buffer is GL buffer. Ask the service for the highest vertex
+ // that will be accessed. Note: It doesn't matter if another context
+ // changes the contents of any of the buffers. The service will still
+ // validate the indices. We just need to know how much to copy across.
+ num_elements = gl->GetMaxValueInBufferCHROMIUMHelper(
+ bound_vertex_array_object_->bound_element_array_buffer(),
+ count, type, ToGLuint(indices)) + 1;
+ }
+
+ bool simulated_client_side_buffers = false;
+ SetupSimulatedClientSideBuffers(
+ function_name, gl, gl_helper, num_elements, primcount,
+ &simulated_client_side_buffers);
+ *simulated = *simulated || simulated_client_side_buffers;
+#endif // defined(GLES2_SUPPORT_CLIENT_SIDE_ARRAYS)
+ return true;
+}
+
+} // namespace gles2
+} // namespace gpu
+
+
diff --git a/gpu/command_buffer/client/vertex_array_object_manager.h b/gpu/command_buffer/client/vertex_array_object_manager.h
new file mode 100644
index 0000000..34f630d
--- /dev/null
+++ b/gpu/command_buffer/client/vertex_array_object_manager.h
@@ -0,0 +1,126 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_VERTEX_ARRAY_OBJECT_MANAGER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_VERTEX_ARRAY_OBJECT_MANAGER_H_
+
+#include <GLES2/gl2.h>
+
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "gles2_impl_export.h"
+
+namespace gpu {
+namespace gles2 {
+
+class GLES2Implementation;
+class GLES2CmdHelper;
+class VertexArrayObject;
+
+// VertexArrayObjectManager manages vertex array objects on the client side
+// of the command buffer.
+class GLES2_IMPL_EXPORT VertexArrayObjectManager {
+ public:
+ VertexArrayObjectManager(
+ GLuint max_vertex_attribs,
+ GLuint array_buffer_id,
+ GLuint element_array_buffer_id);
+ ~VertexArrayObjectManager();
+
+ bool IsReservedId(GLuint id) const;
+
+ // Binds an element array.
+ // Returns true if service should be called.
+ bool BindElementArray(GLuint id);
+
+ // Unbind buffer.
+ void UnbindBuffer(GLuint id);
+
+ // Geneates array objects for the given ids.
+ void GenVertexArrays(GLsizei n, const GLuint* arrays);
+
+ // Deletes array objects for the given ids.
+ void DeleteVertexArrays(GLsizei n, const GLuint* arrays);
+
+ // Binds a vertex array.
+ // changed will be set to true if the service should be called.
+ // Returns false if array is an unknown id.
+ bool BindVertexArray(GLuint array, bool* changed);
+
+ // simulated will be set to true if buffers were simulated.
+ // Returns true service should be called.
+ bool SetupSimulatedClientSideBuffers(
+ const char* function_name,
+ GLES2Implementation* gl,
+ GLES2CmdHelper* gl_helper,
+ GLsizei num_elements,
+ GLsizei primcount,
+ bool* simulated);
+
+ // Returns true if buffers were setup.
+ bool SetupSimulatedIndexAndClientSideBuffers(
+ const char* function_name,
+ GLES2Implementation* gl,
+ GLES2CmdHelper* gl_helper,
+ GLsizei count,
+ GLenum type,
+ GLsizei primcount,
+ const void* indices,
+ GLuint* offset,
+ bool* simulated);
+
+ bool HaveEnabledClientSideBuffers() const;
+
+ void SetAttribEnable(GLuint index, bool enabled);
+
+ bool GetVertexAttrib(GLuint index, GLenum pname, uint32* param);
+
+ bool GetAttribPointer(GLuint index, GLenum pname, void** ptr) const;
+
+ // Returns false if error.
+ bool SetAttribPointer(
+ GLuint buffer_id,
+ GLuint index,
+ GLint size,
+ GLenum type,
+ GLboolean normalized,
+ GLsizei stride,
+ const void* ptr);
+
+ void SetAttribDivisor(GLuint index, GLuint divisor);
+
+ GLuint bound_element_array_buffer() const;
+
+ private:
+ typedef base::hash_map<GLuint, VertexArrayObject*> VertexArrayObjectMap;
+
+ bool IsDefaultVAOBound() const;
+
+ GLsizei CollectData(const void* data,
+ GLsizei bytes_per_element,
+ GLsizei real_stride,
+ GLsizei num_elements);
+
+ GLuint max_vertex_attribs_;
+ GLuint array_buffer_id_;
+ GLsizei array_buffer_size_;
+ GLsizei array_buffer_offset_;
+ GLuint element_array_buffer_id_;
+ GLsizei element_array_buffer_size_;
+ GLsizei collection_buffer_size_;
+ scoped_ptr<int8[]> collection_buffer_;
+
+ VertexArrayObject* default_vertex_array_object_;
+ VertexArrayObject* bound_vertex_array_object_;
+ VertexArrayObjectMap vertex_array_objects_;
+
+ DISALLOW_COPY_AND_ASSIGN(VertexArrayObjectManager);
+};
+
+} // namespace gles2
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_VERTEX_ARRAY_OBJECT_MANAGER_H_
+
diff --git a/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc b/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
new file mode 100644
index 0000000..b3ac065
--- /dev/null
+++ b/gpu/command_buffer/client/vertex_array_object_manager_unittest.cc
@@ -0,0 +1,262 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/vertex_array_object_manager.h"
+
+#include <GLES2/gl2ext.h>
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+namespace gles2 {
+
+class VertexArrayObjectManagerTest : public testing::Test {
+ protected:
+ static const GLuint kMaxAttribs = 4;
+ static const GLuint kClientSideArrayBuffer = 0x1234;
+ static const GLuint kClientSideElementArrayBuffer = 0x1235;
+
+ virtual void SetUp() {
+ manager_.reset(new VertexArrayObjectManager(
+ kMaxAttribs,
+ kClientSideArrayBuffer,
+ kClientSideElementArrayBuffer));
+ }
+ virtual void TearDown() {
+ }
+
+ scoped_ptr<VertexArrayObjectManager> manager_;
+};
+
+// GCC requires these declarations, but MSVC requires they not be present
+#ifndef _MSC_VER
+const GLuint VertexArrayObjectManagerTest::kMaxAttribs;
+const GLuint VertexArrayObjectManagerTest::kClientSideArrayBuffer;
+const GLuint VertexArrayObjectManagerTest::kClientSideElementArrayBuffer;
+#endif
+
+TEST_F(VertexArrayObjectManagerTest, Basic) {
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ // Check out of bounds access.
+ uint32 param;
+ void* ptr;
+ EXPECT_FALSE(manager_->GetVertexAttrib(
+ kMaxAttribs, GL_VERTEX_ATTRIB_ARRAY_ENABLED, ¶m));
+ EXPECT_FALSE(manager_->GetAttribPointer(
+ kMaxAttribs, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr));
+ // Check defaults.
+ for (GLuint ii = 0; ii < kMaxAttribs; ++ii) {
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, ¶m));
+ EXPECT_EQ(0u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_ENABLED, ¶m));
+ EXPECT_EQ(0u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_SIZE, ¶m));
+ EXPECT_EQ(4u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_TYPE, ¶m));
+ EXPECT_EQ(static_cast<uint32>(GL_FLOAT), param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ ii, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, ¶m));
+ EXPECT_EQ(0u, param);
+ EXPECT_TRUE(manager_->GetAttribPointer(
+ ii, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr));
+ EXPECT_TRUE(NULL == ptr);
+ }
+}
+
+TEST_F(VertexArrayObjectManagerTest, UnbindBuffer) {
+ const GLuint kBufferToUnbind = 123;
+ const GLuint kBufferToRemain = 456;
+ const GLuint kElementArray = 789;
+ bool changed = false;
+ GLuint ids[2] = { 1, 3, };
+ manager_->GenVertexArrays(arraysize(ids), ids);
+ // Bind buffers to attribs on 2 vaos.
+ for (size_t ii = 0; ii < arraysize(ids); ++ii) {
+ EXPECT_TRUE(manager_->BindVertexArray(ids[ii], &changed));
+ EXPECT_TRUE(manager_->SetAttribPointer(
+ kBufferToUnbind, 0, 4, GL_FLOAT, false, 0, 0));
+ EXPECT_TRUE(manager_->SetAttribPointer(
+ kBufferToRemain, 1, 4, GL_FLOAT, false, 0, 0));
+ EXPECT_TRUE(manager_->SetAttribPointer(
+ kBufferToUnbind, 2, 4, GL_FLOAT, false, 0, 0));
+ EXPECT_TRUE(manager_->SetAttribPointer(
+ kBufferToRemain, 3, 4, GL_FLOAT, false, 0, 0));
+ for (size_t jj = 0; jj < 4u; ++jj) {
+ manager_->SetAttribEnable(jj, true);
+ }
+ manager_->BindElementArray(kElementArray);
+ }
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ EXPECT_TRUE(manager_->BindVertexArray(ids[0], &changed));
+ // Unbind the buffer.
+ manager_->UnbindBuffer(kBufferToUnbind);
+ manager_->UnbindBuffer(kElementArray);
+ // The attribs are still enabled but their buffer is 0.
+ EXPECT_TRUE(manager_->HaveEnabledClientSideBuffers());
+ // Check the status of the bindings.
+ static const uint32 expected[][4] = {
+ { 0, kBufferToRemain, 0, kBufferToRemain, },
+ { kBufferToUnbind, kBufferToRemain, kBufferToUnbind, kBufferToRemain, },
+ };
+ static const GLuint expected_element_array[] = {
+ 0, kElementArray,
+ };
+ for (size_t ii = 0; ii < arraysize(ids); ++ii) {
+ EXPECT_TRUE(manager_->BindVertexArray(ids[ii], &changed));
+ for (size_t jj = 0; jj < 4; ++jj) {
+ uint32 param = 1;
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ jj, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, ¶m));
+ EXPECT_EQ(expected[ii][jj], param)
+ << "id: " << ids[ii] << ", attrib: " << jj;
+ }
+ EXPECT_EQ(expected_element_array[ii],
+ manager_->bound_element_array_buffer());
+ }
+
+ // The vao that was not bound still has all service side bufferws.
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+
+ // Make sure unbinding 0 does not effect count incorrectly.
+ EXPECT_TRUE(manager_->BindVertexArray(0, &changed));
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ manager_->SetAttribEnable(2, true);
+ manager_->UnbindBuffer(0);
+ manager_->SetAttribEnable(2, false);
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+}
+
+TEST_F(VertexArrayObjectManagerTest, GetSet) {
+ const char* dummy = "dummy";
+ const void* p = reinterpret_cast<const void*>(dummy);
+ manager_->SetAttribEnable(1, true);
+ manager_->SetAttribPointer(123, 1, 3, GL_BYTE, true, 3, p);
+ uint32 param;
+ void* ptr;
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, ¶m));
+ EXPECT_EQ(123u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_ENABLED, ¶m));
+ EXPECT_NE(0u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_SIZE, ¶m));
+ EXPECT_EQ(3u, param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_TYPE, ¶m));
+ EXPECT_EQ(static_cast<uint32>(GL_BYTE), param);
+ EXPECT_TRUE(manager_->GetVertexAttrib(
+ 1, GL_VERTEX_ATTRIB_ARRAY_NORMALIZED, ¶m));
+ EXPECT_NE(0u, param);
+ EXPECT_TRUE(manager_->GetAttribPointer(
+ 1, GL_VERTEX_ATTRIB_ARRAY_POINTER, &ptr));
+ EXPECT_EQ(p, ptr);
+
+ // Check that getting the divisor is passed to the service.
+ // This is because the divisor is an optional feature which
+ // only the service can validate.
+ EXPECT_FALSE(manager_->GetVertexAttrib(
+ 0, GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE, ¶m));
+}
+
+TEST_F(VertexArrayObjectManagerTest, HaveEnabledClientSideArrays) {
+ // Check turning on an array.
+ manager_->SetAttribEnable(1, true);
+ EXPECT_TRUE(manager_->HaveEnabledClientSideBuffers());
+ // Check turning off an array.
+ manager_->SetAttribEnable(1, false);
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ // Check turning on an array and assigning a buffer.
+ manager_->SetAttribEnable(1, true);
+ manager_->SetAttribPointer(123, 1, 3, GL_BYTE, true, 3, NULL);
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+ // Check unassigning a buffer.
+ manager_->SetAttribPointer(0, 1, 3, GL_BYTE, true, 3, NULL);
+ EXPECT_TRUE(manager_->HaveEnabledClientSideBuffers());
+ // Check disabling the array.
+ manager_->SetAttribEnable(1, false);
+ EXPECT_FALSE(manager_->HaveEnabledClientSideBuffers());
+}
+
+TEST_F(VertexArrayObjectManagerTest, BindElementArray) {
+ bool changed = false;
+ GLuint ids[2] = { 1, 3, };
+ manager_->GenVertexArrays(arraysize(ids), ids);
+
+ // Check the default element array is 0.
+ EXPECT_EQ(0u, manager_->bound_element_array_buffer());
+ // Check binding the same array does not need a service call.
+ EXPECT_FALSE(manager_->BindElementArray(0u));
+ // Check binding a new element array requires a service call.
+ EXPECT_TRUE(manager_->BindElementArray(55u));
+ // Check the element array was bound.
+ EXPECT_EQ(55u, manager_->bound_element_array_buffer());
+ // Check binding the same array does not need a service call.
+ EXPECT_FALSE(manager_->BindElementArray(55u));
+
+ // Check with a new vao.
+ EXPECT_TRUE(manager_->BindVertexArray(1, &changed));
+ // Check the default element array is 0.
+ EXPECT_EQ(0u, manager_->bound_element_array_buffer());
+ // Check binding a new element array requires a service call.
+ EXPECT_TRUE(manager_->BindElementArray(11u));
+ // Check the element array was bound.
+ EXPECT_EQ(11u, manager_->bound_element_array_buffer());
+ // Check binding the same array does not need a service call.
+ EXPECT_FALSE(manager_->BindElementArray(11u));
+
+ // check switching vao bindings returns the correct element array.
+ EXPECT_TRUE(manager_->BindVertexArray(3, &changed));
+ EXPECT_EQ(0u, manager_->bound_element_array_buffer());
+ EXPECT_TRUE(manager_->BindVertexArray(0, &changed));
+ EXPECT_EQ(55u, manager_->bound_element_array_buffer());
+ EXPECT_TRUE(manager_->BindVertexArray(1, &changed));
+ EXPECT_EQ(11u, manager_->bound_element_array_buffer());
+}
+
+TEST_F(VertexArrayObjectManagerTest, GenBindDelete) {
+ // Check unknown array fails.
+ bool changed = false;
+ EXPECT_FALSE(manager_->BindVertexArray(123, &changed));
+ EXPECT_FALSE(changed);
+
+ GLuint ids[2] = { 1, 3, };
+ manager_->GenVertexArrays(arraysize(ids), ids);
+ // Check Genned arrays succeed.
+ EXPECT_TRUE(manager_->BindVertexArray(1, &changed));
+ EXPECT_TRUE(changed);
+ EXPECT_TRUE(manager_->BindVertexArray(3, &changed));
+ EXPECT_TRUE(changed);
+
+ // Check binding the same array returns changed as false.
+ EXPECT_TRUE(manager_->BindVertexArray(3, &changed));
+ EXPECT_FALSE(changed);
+
+ // Check deleted ararys fail to bind
+ manager_->DeleteVertexArrays(2, ids);
+ EXPECT_FALSE(manager_->BindVertexArray(1, &changed));
+ EXPECT_FALSE(changed);
+ EXPECT_FALSE(manager_->BindVertexArray(3, &changed));
+ EXPECT_FALSE(changed);
+
+ // Check binding 0 returns changed as false since it's
+ // already bound.
+ EXPECT_TRUE(manager_->BindVertexArray(0, &changed));
+ EXPECT_FALSE(changed);
+}
+
+TEST_F(VertexArrayObjectManagerTest, IsReservedId) {
+ EXPECT_TRUE(manager_->IsReservedId(kClientSideArrayBuffer));
+ EXPECT_TRUE(manager_->IsReservedId(kClientSideElementArrayBuffer));
+ EXPECT_FALSE(manager_->IsReservedId(0));
+ EXPECT_FALSE(manager_->IsReservedId(1));
+ EXPECT_FALSE(manager_->IsReservedId(2));
+}
+
+} // namespace gles2
+} // namespace gpu
+